Adding spotless support for subprojects under :test (#1464)

Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com>
This commit is contained in:
Sarat Vemulapalli 2021-10-29 13:20:39 -07:00 committed by GitHub
parent f4bdb94463
commit f6115ae160
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
239 changed files with 10044 additions and 6121 deletions

View File

@ -27,6 +27,8 @@
<suppress files="libs" checks="." />
<!-- Excludes checkstyle run on modules module -->
<suppress files="modules" checks="." />
<!-- Excludes checkstyle run on test module -->
<suppress files="test" checks="." />
<!-- Excludes checkstyle run on rest-api-spec module -->
<suppress files="rest-api-spec" checks="." />

View File

@ -77,16 +77,7 @@ def projectPathsToExclude = [
':plugins:repository-s3',
':plugins:store-smb',
':plugins:transport-nio',
':qa:die-with-dignity',
':test:fixtures:azure-fixture',
':test:fixtures:gcs-fixture',
':test:fixtures:hdfs-fixture',
':test:fixtures:krb5kdc-fixture',
':test:fixtures:minio-fixture',
':test:fixtures:old-elasticsearch',
':test:fixtures:s3-fixture',
':test:framework',
':test:logger-usage'
':qa:die-with-dignity'
]
subprojects {

View File

@ -37,8 +37,6 @@ import org.opensearch.action.ActionRequest;
import org.opensearch.action.ActionResponse;
import org.opensearch.common.CheckedConsumer;
import org.opensearch.tasks.Task;
import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.action.support.TransportAction;
import static org.opensearch.action.support.PlainActionFuture.newFuture;
@ -46,8 +44,10 @@ public class ActionTestUtils {
private ActionTestUtils() { /* no construction */ }
public static <Request extends ActionRequest, Response extends ActionResponse>
Response executeBlocking(TransportAction<Request, Response> action, Request request) {
public static <Request extends ActionRequest, Response extends ActionResponse> Response executeBlocking(
TransportAction<Request, Response> action,
Request request
) {
PlainActionFuture<Response> future = newFuture();
action.execute(request, future);
return future.actionGet();
@ -58,14 +58,16 @@ public class ActionTestUtils {
*
* This is a shim method to make execution publicly available in tests.
*/
public static <Request extends ActionRequest, Response extends ActionResponse>
void execute(TransportAction<Request, Response> action, Task task, Request request, ActionListener<Response> listener) {
public static <Request extends ActionRequest, Response extends ActionResponse> void execute(
TransportAction<Request, Response> action,
Task task,
Request request,
ActionListener<Response> listener
) {
action.execute(task, request, listener);
}
public static <T> ActionListener<T> assertNoFailureListener(CheckedConsumer<T, Exception> consumer) {
return ActionListener.wrap(consumer, e -> {
throw new AssertionError(e);
});
return ActionListener.wrap(consumer, e -> { throw new AssertionError(e); });
}
}

View File

@ -25,7 +25,6 @@
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
@ -81,8 +80,12 @@ public class ClusterStateCreationUtils {
* @param primaryState state of primary
* @param replicaStates states of the replicas. length of this array determines also the number of replicas
*/
public static ClusterState state(String index, boolean activePrimaryLocal, ShardRoutingState primaryState,
ShardRoutingState... replicaStates) {
public static ClusterState state(
String index,
boolean activePrimaryLocal,
ShardRoutingState primaryState,
ShardRoutingState... replicaStates
) {
final int numberOfReplicas = replicaStates.length;
int numberOfNodes = numberOfReplicas + 1;
@ -106,10 +109,15 @@ public class ClusterStateCreationUtils {
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
final int primaryTerm = 1 + randomInt(200);
IndexMetadata indexMetadata = IndexMetadata.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm)
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())
)
.primaryTerm(0, primaryTerm)
.build();
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
@ -135,8 +143,9 @@ public class ClusterStateCreationUtils {
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, true,
primaryState, unassignedInfo));
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, true, primaryState, unassignedInfo)
);
for (ShardRoutingState replicaState : replicaStates) {
String replicaNode = null;
@ -152,22 +161,27 @@ public class ClusterStateCreationUtils {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState,
unassignedInfo));
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, unassignedInfo)
);
}
final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build();
IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata);
indexMetadataBuilder.putInSyncAllocationIds(0,
indexShardRoutingTable.activeShards().stream().map(ShardRouting::allocationId).map(AllocationId::getId)
.collect(Collectors.toSet())
indexMetadataBuilder.putInSyncAllocationIds(
0,
indexShardRoutingTable.activeShards()
.stream()
.map(ShardRouting::allocationId)
.map(AllocationId::getId)
.collect(Collectors.toSet())
);
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metadata(Metadata.builder().put(indexMetadataBuilder.build(), false).generateClusterUuidIfNeeded());
state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(indexMetadata.getIndex())
.addIndexShard(indexShardRoutingTable)).build());
state.routingTable(
RoutingTable.builder().add(IndexRoutingTable.builder(indexMetadata.getIndex()).addIndexShard(indexShardRoutingTable)).build()
);
return state.build();
}
@ -185,17 +199,23 @@ public class ClusterStateCreationUtils {
}
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(randomFrom(nodes));
IndexMetadata indexMetadata = IndexMetadata.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())
)
.build();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex());
for (int i = 0; i < numberOfPrimaries; i++) {
ShardId shardId = new ShardId(indexMetadata.getIndex(), i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED));
TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED)
);
indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build());
}
@ -206,8 +226,6 @@ public class ClusterStateCreationUtils {
return state.build();
}
/**
* Creates cluster state with the given indices, each index containing #(numberOfPrimaries)
* started primary shards and no replicas. The cluster state contains #(numberOfNodes) nodes
@ -228,17 +246,23 @@ public class ClusterStateCreationUtils {
List<String> nodesList = new ArrayList<>(nodes);
int currentNodeToAssign = 0;
for (String index : indices) {
IndexMetadata indexMetadata = IndexMetadata.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())
)
.build();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex());
for (int i = 0; i < numberOfPrimaries; i++) {
ShardId shardId = new ShardId(indexMetadata.getIndex(), i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(shardId, nodesList.get(currentNodeToAssign++), true, ShardRoutingState.STARTED));
TestShardRouting.newShardRouting(shardId, nodesList.get(currentNodeToAssign++), true, ShardRoutingState.STARTED)
);
if (currentNodeToAssign == nodesList.size()) {
currentNodeToAssign = 0;
}
@ -268,10 +292,15 @@ public class ClusterStateCreationUtils {
}
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
IndexMetadata indexMetadata = IndexMetadata.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(
Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())
)
.build();
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
state.metadata(Metadata.builder().put(indexMetadata, false).generateClusterUuidIfNeeded());
@ -279,17 +308,18 @@ public class ClusterStateCreationUtils {
for (int i = 0; i < numberOfShards; i++) {
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, true,
ShardRoutingState.STARTED));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, false,
ShardRoutingState.STARTED));
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, true, ShardRoutingState.STARTED)
);
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, i, newNode(1).getId(), null, false, ShardRoutingState.STARTED)
);
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
return state.build();
}
/**
* Creates cluster state with several indexes, shards and replicas and all shards STARTED.
*/
@ -311,19 +341,26 @@ public class ClusterStateCreationUtils {
for (String index : indices) {
IndexMetadata indexMetadata = IndexMetadata.builder(index)
.settings(Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).put(SETTING_CREATION_DATE, System.currentTimeMillis()))
.build();
.settings(
Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())
)
.build();
metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded();
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex());
for (int i = 0; i < numberOfShards; i++) {
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder
.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, true, ShardRoutingState.STARTED));
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, i, newNode(0).getId(), null, true, ShardRoutingState.STARTED)
);
for (int replica = 0; replica < numberOfReplicas; replica++) {
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(replica + 1).getId(), null, false,
ShardRoutingState.STARTED));
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, i, newNode(replica + 1).getId(), null, false, ShardRoutingState.STARTED)
);
}
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
@ -357,8 +394,12 @@ public class ClusterStateCreationUtils {
* @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state
* @param unassignedReplicas number of replicas that should be unassigned
*/
public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal,
int assignedReplicas, int unassignedReplicas) {
public static ClusterState stateWithActivePrimary(
String index,
boolean activePrimaryLocal,
int assignedReplicas,
int unassignedReplicas
) {
ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
// no point in randomizing - node assignment later on does it too.
for (int i = 0; i < assignedReplicas; i++) {
@ -409,8 +450,13 @@ public class ClusterStateCreationUtils {
}
private static DiscoveryNode newNode(int nodeId) {
return new DiscoveryNode("node_" + nodeId, OpenSearchTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(),
new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), Version.CURRENT);
return new DiscoveryNode(
"node_" + nodeId,
OpenSearchTestCase.buildNewFakeTransportAddress(),
Collections.emptyMap(),
new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES),
Version.CURRENT
);
}
private static String selectAndRemove(Set<String> strings) {

View File

@ -36,17 +36,17 @@ import org.opensearch.action.support.WriteRequest;
import org.opensearch.common.Nullable;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.translog.Translog;
import org.opensearch.action.support.replication.TransportWriteAction;
import java.util.concurrent.CountDownLatch;
public abstract class TransportWriteActionTestHelper {
public static void performPostWriteActions(final IndexShard indexShard,
final WriteRequest<?> request,
@Nullable final Translog.Location location,
final Logger logger) {
public static void performPostWriteActions(
final IndexShard indexShard,
final WriteRequest<?> request,
@Nullable final Translog.Location location,
final Logger logger
) {
final CountDownLatch latch = new CountDownLatch(1);
TransportWriteAction.RespondingWriteResult writerResult = new TransportWriteAction.RespondingWriteResult() {
@Override

View File

@ -89,8 +89,9 @@ public class BootstrapForTesting {
static {
// make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
"please set ${java.io.tmpdir} in pom.xml"));
Path javaTmpDir = PathUtils.get(
Objects.requireNonNull(System.getProperty("java.io.tmpdir"), "please set ${java.io.tmpdir} in pom.xml")
);
try {
Security.ensureDirectoryExists(javaTmpDir);
} catch (Exception e) {
@ -98,8 +99,8 @@ public class BootstrapForTesting {
}
// just like bootstrap, initialize natives, then SM
final boolean memoryLock =
BootstrapSettings.MEMORY_LOCK_SETTING.get(Settings.EMPTY); // use the default bootstrap.memory_lock setting
final boolean memoryLock = BootstrapSettings.MEMORY_LOCK_SETTING.get(Settings.EMPTY); // use the default bootstrap.memory_lock
// setting
final boolean systemCallFilter = Booleans.parseBoolean(System.getProperty("tests.system_call_filter", "true"));
Bootstrap.initializeNatives(javaTmpDir, memoryLock, systemCallFilter, true);
@ -149,7 +150,7 @@ public class BootstrapForTesting {
// read test-framework permissions
Map<String, URL> codebases = Security.getCodebaseJarMap(JarHell.parseClassPath());
// when testing server, the main opensearch code is not yet in a jar, so we need to manually add it
addClassCodebase(codebases,"opensearch", "org.opensearch.plugins.PluginsService");
addClassCodebase(codebases, "opensearch", "org.opensearch.plugins.PluginsService");
if (System.getProperty("tests.gradle") == null) {
// intellij and eclipse don't package our internal libs, so we need to set the codebases for them manually
addClassCodebase(codebases, "plugin-classloader", "org.opensearch.plugins.ExtendedPluginsClassLoader");
@ -174,7 +175,8 @@ public class BootstrapForTesting {
// guarantee plugin classes are initialized first, in case they have one-time hacks.
// this just makes unit testing more realistic
for (URL url : Collections.list(
BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.OPENSEARCH_PLUGIN_PROPERTIES))) {
BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.OPENSEARCH_PLUGIN_PROPERTIES)
)) {
Properties properties = new Properties();
try (InputStream stream = FileSystemUtils.openFileURLStream(url)) {
properties.load(stream);
@ -211,16 +213,18 @@ public class BootstrapForTesting {
* like core, test-framework, etc. this way tests fail if accesscontroller blocks are missing.
*/
@SuppressForbidden(reason = "accesses fully qualified URLs to configure security")
static Map<String,Policy> getPluginPermissions() throws Exception {
List<URL> pluginPolicies =
Collections.list(BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.OPENSEARCH_PLUGIN_POLICY));
static Map<String, Policy> getPluginPermissions() throws Exception {
List<URL> pluginPolicies = Collections.list(
BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.OPENSEARCH_PLUGIN_POLICY)
);
if (pluginPolicies.isEmpty()) {
return Collections.emptyMap();
}
// compute classpath minus obvious places, all other jars will get the permission.
Set<URL> codebases = new HashSet<>(parseClassPathWithSymlinks());
Set<URL> excluded = new HashSet<>(Arrays.asList(
Set<URL> excluded = new HashSet<>(
Arrays.asList(
// es core
Bootstrap.class.getProtectionDomain().getCodeSource().getLocation(),
// es test framework
@ -231,7 +235,8 @@ public class BootstrapForTesting {
RandomizedRunner.class.getProtectionDomain().getCodeSource().getLocation(),
// junit library
Assert.class.getProtectionDomain().getCodeSource().getLocation()
));
)
);
codebases.removeAll(excluded);
// parse each policy file, with codebase substitution from the classpath
@ -241,7 +246,7 @@ public class BootstrapForTesting {
}
// consult each policy file for those codebases
Map<String,Policy> map = new HashMap<>();
Map<String, Policy> map = new HashMap<>();
for (URL url : codebases) {
map.put(url.getFile(), new Policy() {
@Override
@ -278,17 +283,16 @@ public class BootstrapForTesting {
}
return raw;
}
/**
* Collect host addresses of all local interfaces so we could check
* Collect host addresses of all local interfaces so we could check
* if the network connection is being made only on those.
* @return host names and addresses of all local interfaces
*/
private static Set<String> getTrustedHosts() {
//
//
try {
return Collections
.list(NetworkInterface.getNetworkInterfaces())
return Collections.list(NetworkInterface.getNetworkInterfaces())
.stream()
.flatMap(iface -> Collections.list(iface.getInetAddresses()).stream())
.map(address -> NetworkAddress.format(address))

View File

@ -52,11 +52,12 @@ abstract class OpenSearchCliTestCase extends OpenSearchTestCase {
}
void runTest(
final int expectedStatus,
final boolean expectedInit,
final BiConsumer<String, String> outputConsumer,
final InitConsumer initConsumer,
final String... args) throws Exception {
final int expectedStatus,
final boolean expectedInit,
final BiConsumer<String, String> outputConsumer,
final InitConsumer initConsumer,
final String... args
) throws Exception {
final MockTerminal terminal = new MockTerminal();
final Path home = createTempDir();
try {
@ -65,10 +66,11 @@ abstract class OpenSearchCliTestCase extends OpenSearchTestCase {
@Override
protected Environment createEnv(final Map<String, String> settings) throws UserException {
Settings.Builder builder = Settings.builder().put("path.home", home);
settings.forEach((k,v) -> builder.put(k, v));
settings.forEach((k, v) -> builder.put(k, v));
final Settings realSettings = builder.build();
return new Environment(realSettings, home.resolve("config"));
}
@Override
void init(final boolean daemonize, final Path pidFile, final boolean quiet, Environment initialEnv) {
init.set(true);

View File

@ -32,8 +32,6 @@
package org.opensearch.cli;
import org.opensearch.cli.Terminal;
import java.io.ByteArrayOutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;

View File

@ -85,20 +85,25 @@ public final class DataStreamTestHelper {
}
public static String generateMapping(String timestampFieldName, String type) {
return "{\n" +
" \"_data_stream_timestamp\": {\n" +
" \"enabled\": true,\n" +
" \"timestamp_field\": { \"name\": \"" + timestampFieldName + "\" }" +
" }," +
" \"properties\": {\n" +
" \"" + timestampFieldName + "\": {\n" +
" \"type\": \"" + type + "\"\n" +
" }\n" +
" }\n" +
" }";
return "{\n"
+ " \"_data_stream_timestamp\": {\n"
+ " \"enabled\": true,\n"
+ " \"timestamp_field\": { \"name\": \""
+ timestampFieldName
+ "\" }"
+ " },"
+ " \"properties\": {\n"
+ " \""
+ timestampFieldName
+ "\": {\n"
+ " \"type\": \""
+ type
+ "\"\n"
+ " }\n"
+ " }\n"
+ " }";
}
public static List<Index> randomIndexInstances() {
int numIndices = OpenSearchTestCase.randomIntBetween(0, 128);
List<Index> indices = new ArrayList<>(numIndices);
@ -159,5 +164,4 @@ public final class DataStreamTestHelper {
return IndexMetadata.builder(name).settings(b).numberOfShards(1).numberOfReplicas(1).build();
}
}

View File

@ -60,8 +60,7 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
@Nullable // if no fakery should take place
private volatile BiFunction<DiscoveryNode, FsInfo.Path, FsInfo.Path> diskUsageFunction;
public MockInternalClusterInfoService(Settings settings, ClusterService clusterService,
ThreadPool threadPool, NodeClient client) {
public MockInternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, NodeClient client) {
super(settings, clusterService, threadPool, client);
}
@ -91,22 +90,44 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
return nodesStats.stream().map(nodeStats -> {
final DiscoveryNode discoveryNode = nodeStats.getNode();
final FsInfo oldFsInfo = nodeStats.getFs();
return new NodeStats(discoveryNode, nodeStats.getTimestamp(), nodeStats.getIndices(), nodeStats.getOs(),
nodeStats.getProcess(), nodeStats.getJvm(), nodeStats.getThreadPool(), new FsInfo(oldFsInfo.getTimestamp(),
oldFsInfo.getIoStats(),
StreamSupport.stream(oldFsInfo.spliterator(), false)
.map(fsInfoPath -> diskUsageFunction.apply(discoveryNode, fsInfoPath))
.toArray(FsInfo.Path[]::new)), nodeStats.getTransport(),
nodeStats.getHttp(), nodeStats.getBreaker(), nodeStats.getScriptStats(), nodeStats.getDiscoveryStats(),
nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), nodeStats.getScriptCacheStats(),
nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats());
return new NodeStats(
discoveryNode,
nodeStats.getTimestamp(),
nodeStats.getIndices(),
nodeStats.getOs(),
nodeStats.getProcess(),
nodeStats.getJvm(),
nodeStats.getThreadPool(),
new FsInfo(
oldFsInfo.getTimestamp(),
oldFsInfo.getIoStats(),
StreamSupport.stream(oldFsInfo.spliterator(), false)
.map(fsInfoPath -> diskUsageFunction.apply(discoveryNode, fsInfoPath))
.toArray(FsInfo.Path[]::new)
),
nodeStats.getTransport(),
nodeStats.getHttp(),
nodeStats.getBreaker(),
nodeStats.getScriptStats(),
nodeStats.getDiscoveryStats(),
nodeStats.getIngestStats(),
nodeStats.getAdaptiveSelectionStats(),
nodeStats.getScriptCacheStats(),
nodeStats.getIndexingPressureStats(),
nodeStats.getShardIndexingPressureStats()
);
}).collect(Collectors.toList());
}
class SizeFakingClusterInfo extends ClusterInfo {
SizeFakingClusterInfo(ClusterInfo delegate) {
super(delegate.getNodeLeastAvailableDiskUsages(), delegate.getNodeMostAvailableDiskUsages(),
delegate.shardSizes, delegate.routingToDataPath, delegate.reservedSpace);
super(
delegate.getNodeLeastAvailableDiskUsages(),
delegate.getNodeMostAvailableDiskUsages(),
delegate.shardSizes,
delegate.routingToDataPath,
delegate.reservedSpace
);
}
@Override

View File

@ -70,17 +70,22 @@ import static java.util.Collections.emptyMap;
import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING;
public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
private static final ClusterSettings EMPTY_CLUSTER_SETTINGS =
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings(
Settings.EMPTY,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS
);
public static final SnapshotsInfoService SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES = () ->
new SnapshotShardSizeInfo(ImmutableOpenMap.of()) {
@Override
public Long getShardSize(ShardRouting shardRouting) {
assert shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT :
"Expecting a recovery source of type [SNAPSHOT] but got [" + shardRouting.recoverySource().getType() + ']';
throw new UnsupportedOperationException();
}
public static final SnapshotsInfoService SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES = () -> new SnapshotShardSizeInfo(
ImmutableOpenMap.of()
) {
@Override
public Long getShardSize(ShardRouting shardRouting) {
assert shardRouting.recoverySource()
.getType() == RecoverySource.Type.SNAPSHOT : "Expecting a recovery source of type [SNAPSHOT] but got ["
+ shardRouting.recoverySource().getType()
+ ']';
throw new UnsupportedOperationException();
}
};
public static MockAllocationService createAllocationService() {
@ -97,16 +102,22 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) {
return new MockAllocationService(
randomAllocationDeciders(settings, clusterSettings, random),
new TestGatewayAllocator(), new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE,
SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES);
randomAllocationDeciders(settings, clusterSettings, random),
new TestGatewayAllocator(),
new BalancedShardsAllocator(settings),
EmptyClusterInfoService.INSTANCE,
SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES
);
}
public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) {
return new MockAllocationService(
randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()),
new TestGatewayAllocator(), new BalancedShardsAllocator(settings), clusterInfoService,
SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES);
randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()),
new TestGatewayAllocator(),
new BalancedShardsAllocator(settings),
clusterInfoService,
SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES
);
}
public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator gatewayAllocator) {
@ -124,18 +135,24 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
) {
return new MockAllocationService(
randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()),
gatewayAllocator, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE, snapshotsInfoService);
gatewayAllocator,
new BalancedShardsAllocator(settings),
EmptyClusterInfoService.INSTANCE,
snapshotsInfoService
);
}
public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettings clusterSettings, Random random) {
List<AllocationDecider> deciders = new ArrayList<>(
ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList()));
ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList())
);
Collections.shuffle(deciders, random);
return new AllocationDeciders(deciders);
}
protected static Set<DiscoveryNodeRole> MASTER_DATA_ROLES =
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE)));
protected static Set<DiscoveryNodeRole> MASTER_DATA_ROLES = Collections.unmodifiableSet(
new HashSet<>(Arrays.asList(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE))
);
protected static DiscoveryNode newNode(String nodeId) {
return newNode(nodeId, Version.CURRENT);
@ -157,7 +174,7 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, version);
}
protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
List<ShardRouting> initializingShards = clusterState.getRoutingNodes().shardsWithState(INITIALIZING);
if (initializingShards.isEmpty()) {
return clusterState;
@ -166,10 +183,15 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
}
protected static AllocationDeciders yesAllocationDeciders() {
return new AllocationDeciders(Arrays.asList(
new TestAllocateDecision(Decision.YES),
new SameShardAllocationDecider(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))));
return new AllocationDeciders(
Arrays.asList(
new TestAllocateDecision(Decision.YES),
new SameShardAllocationDecider(
Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
)
)
);
}
protected static AllocationDeciders noAllocationDeciders() {
@ -177,10 +199,15 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
}
protected static AllocationDeciders throttleAllocationDeciders() {
return new AllocationDeciders(Arrays.asList(
new TestAllocateDecision(Decision.THROTTLE),
new SameShardAllocationDecider(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))));
return new AllocationDeciders(
Arrays.asList(
new TestAllocateDecision(Decision.THROTTLE),
new SameShardAllocationDecider(
Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
)
)
);
}
protected ClusterState applyStartedShardsUntilNoChange(ClusterState clusterState, AllocationService service) {
@ -207,9 +234,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
*
* @return the cluster state after completing the reroute.
*/
public static ClusterState startInitializingShardsAndReroute(AllocationService allocationService,
ClusterState clusterState,
RoutingNode routingNode) {
public static ClusterState startInitializingShardsAndReroute(
AllocationService allocationService,
ClusterState clusterState,
RoutingNode routingNode
) {
return startShardsAndReroute(allocationService, clusterState, routingNode.shardsWithState(INITIALIZING));
}
@ -218,11 +247,16 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
*
* @return the cluster state after completing the reroute.
*/
public static ClusterState startInitializingShardsAndReroute(AllocationService allocationService,
ClusterState clusterState,
String index) {
return startShardsAndReroute(allocationService, clusterState,
clusterState.routingTable().index(index).shardsWithState(INITIALIZING));
public static ClusterState startInitializingShardsAndReroute(
AllocationService allocationService,
ClusterState clusterState,
String index
) {
return startShardsAndReroute(
allocationService,
clusterState,
clusterState.routingTable().index(index).shardsWithState(INITIALIZING)
);
}
/**
@ -230,9 +264,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
*
* @return the cluster state after completing the reroute.
*/
public static ClusterState startShardsAndReroute(AllocationService allocationService,
ClusterState clusterState,
ShardRouting... initializingShards) {
public static ClusterState startShardsAndReroute(
AllocationService allocationService,
ClusterState clusterState,
ShardRouting... initializingShards
) {
return startShardsAndReroute(allocationService, clusterState, Arrays.asList(initializingShards));
}
@ -241,9 +277,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
*
* @return the cluster state after completing the reroute.
*/
public static ClusterState startShardsAndReroute(AllocationService allocationService,
ClusterState clusterState,
List<ShardRouting> initializingShards) {
public static ClusterState startShardsAndReroute(
AllocationService allocationService,
ClusterState clusterState,
List<ShardRouting> initializingShards
) {
return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting");
}
@ -271,9 +309,13 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
private volatile long nanoTimeOverride = -1L;
public MockAllocationService(AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService,
SnapshotsInfoService snapshotsInfoService) {
public MockAllocationService(
AllocationDeciders allocationDeciders,
GatewayAllocator gatewayAllocator,
ShardsAllocator shardsAllocator,
ClusterInfoService clusterInfoService,
SnapshotsInfoService snapshotsInfoService
) {
super(allocationDeciders, gatewayAllocator, shardsAllocator, clusterInfoService, snapshotsInfoService);
}
@ -314,8 +356,11 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
}
@Override
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
UnassignedAllocationHandler unassignedAllocationHandler) {
public void allocateUnassigned(
ShardRouting shardRouting,
RoutingAllocation allocation,
UnassignedAllocationHandler unassignedAllocationHandler
) {
if (shardRouting.primary() || shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
return;
}

View File

@ -73,19 +73,19 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
allocation = createAllocationService(sb.build());
}
public Metadata buildMetadata(Metadata.Builder mb, String indexPrefix, int numberOfIndices, int numberOfShards,
int numberOfReplicas) {
public Metadata buildMetadata(Metadata.Builder mb, String indexPrefix, int numberOfIndices, int numberOfShards, int numberOfReplicas) {
for (int i = 0; i < numberOfIndices; i++) {
mb.put(IndexMetadata.builder(indexPrefix + i)
.settings(settings(Version.CURRENT)
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0"))
.numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas));
mb.put(
IndexMetadata.builder(indexPrefix + i)
.settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0"))
.numberOfShards(numberOfShards)
.numberOfReplicas(numberOfReplicas)
);
}
return mb.build();
}
public RoutingTable buildRoutingTable(RoutingTable.Builder rb, Metadata metadata, String indexPrefix,
int numberOfIndices) {
public RoutingTable buildRoutingTable(RoutingTable.Builder rb, Metadata metadata, String indexPrefix, int numberOfIndices) {
for (int i = 0; i < numberOfIndices; i++) {
rb.addAsNew(metadata.index(indexPrefix + i));
}
@ -127,8 +127,7 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
Metadata metadata = buildMetadata(Metadata.builder(), DEFAULT_INDEX_PREFIX, indices, shards, replicas);
RoutingTable routingTable = buildRoutingTable(RoutingTable.builder(), metadata, DEFAULT_INDEX_PREFIX, indices);
DiscoveryNodes nodes = addNodes(DiscoveryNodes.builder(), DEFAULT_NODE_PREFIX, nodeCount);
initialClusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable)
.nodes(nodes).build();
initialClusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(nodes).build();
buildAllocationService();
initialClusterState = allocateShardsAndBalance(initialClusterState);
@ -191,8 +190,7 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
int movesToBalance = 0;
do {
assert movesToBalance <= MAX_REROUTE_STEPS_ALLOWED : "Could not balance cluster in max allowed moves";
clusterState = allocation.applyStartedShards(clusterState,
clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
clusterState = allocation.reroute(clusterState, "reroute");
initShards = clusterState.getRoutingNodes().shardsWithState(INITIALIZING);
@ -279,12 +277,10 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
public ClusterState allocateShardsAndBalance(ClusterState clusterState) {
int iterations = 0;
do {
clusterState = allocation.applyStartedShards(clusterState,
clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
clusterState = allocation.reroute(clusterState, "reroute");
iterations++;
} while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()
&& iterations < MAX_REROUTE_STEPS_ALLOWED);
} while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() && iterations < MAX_REROUTE_STEPS_ALLOWED);
return clusterState;
}
@ -306,10 +302,8 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
public void addNodesWithIndexing(int nodeCount, String node_prefix, int indices, int shards, int replicas) {
final String NEW_INDEX_PREFIX = "new_index_";
Metadata md = buildMetadata(Metadata.builder(clusterState.getMetadata()), NEW_INDEX_PREFIX, indices, shards,
replicas);
RoutingTable rb = buildRoutingTable(RoutingTable.builder(clusterState.getRoutingTable()), md, NEW_INDEX_PREFIX,
indices);
Metadata md = buildMetadata(Metadata.builder(clusterState.getMetadata()), NEW_INDEX_PREFIX, indices, shards, replicas);
RoutingTable rb = buildRoutingTable(RoutingTable.builder(clusterState.getRoutingTable()), md, NEW_INDEX_PREFIX, indices);
DiscoveryNodes nodes = addNodes(DiscoveryNodes.builder(clusterState.nodes()), node_prefix, nodeCount);
clusterState = ClusterState.builder(clusterState).metadata(md).routingTable(rb).nodes(nodes).build();
for (int i = 0; i < indices; i++) {
@ -318,10 +312,8 @@ public abstract class OpenSearchAllocationWithConstraintsTestCase extends OpenSe
}
public void addIndices(String index_prefix, int indices, int shards, int replicas) {
Metadata md = buildMetadata(Metadata.builder(clusterState.getMetadata()), index_prefix, indices, shards,
replicas);
RoutingTable rb = buildRoutingTable(RoutingTable.builder(clusterState.getRoutingTable()), md, index_prefix,
indices);
Metadata md = buildMetadata(Metadata.builder(clusterState.getMetadata()), index_prefix, indices, shards, replicas);
RoutingTable rb = buildRoutingTable(RoutingTable.builder(clusterState.getRoutingTable()), md, index_prefix, indices);
clusterState = ClusterState.builder(clusterState).metadata(md).routingTable(rb).build();
if (indexShardCount == null) {

View File

@ -230,28 +230,29 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
// The time it takes to complete an election
public static final long DEFAULT_ELECTION_DELAY
// Pinging all peers twice should be enough to discover all nodes
// Pinging all peers twice should be enough to discover all nodes
= defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2
// Then wait for an election to be scheduled; we allow enough time for retries to allow for collisions
+ defaultMillis(ELECTION_INITIAL_TIMEOUT_SETTING) * ELECTION_RETRIES
+ defaultMillis(ELECTION_BACK_OFF_TIME_SETTING) * ELECTION_RETRIES * (ELECTION_RETRIES - 1) / 2
+ defaultMillis(ELECTION_DURATION_SETTING) * ELECTION_RETRIES
// Allow two round-trip for pre-voting and voting
+ 4 * DEFAULT_DELAY_VARIABILITY
// Then a commit of the new leader's first cluster state
+ DEFAULT_CLUSTER_STATE_UPDATE_DELAY;
// Then wait for an election to be scheduled; we allow enough time for retries to allow for collisions
+ defaultMillis(ELECTION_INITIAL_TIMEOUT_SETTING) * ELECTION_RETRIES + defaultMillis(ELECTION_BACK_OFF_TIME_SETTING)
* ELECTION_RETRIES * (ELECTION_RETRIES - 1) / 2 + defaultMillis(ELECTION_DURATION_SETTING) * ELECTION_RETRIES
// Allow two round-trip for pre-voting and voting
+ 4 * DEFAULT_DELAY_VARIABILITY
// Then a commit of the new leader's first cluster state
+ DEFAULT_CLUSTER_STATE_UPDATE_DELAY;
public static final long DEFAULT_STABILISATION_TIME =
// If leader just blackholed, need to wait for this to be detected
(defaultMillis(LEADER_CHECK_INTERVAL_SETTING) + defaultMillis(LEADER_CHECK_TIMEOUT_SETTING))
* defaultInt(LEADER_CHECK_RETRY_COUNT_SETTING)
(defaultMillis(LEADER_CHECK_INTERVAL_SETTING) + defaultMillis(LEADER_CHECK_TIMEOUT_SETTING)) * defaultInt(
LEADER_CHECK_RETRY_COUNT_SETTING
)
// then wait for a follower to be promoted to leader
+ DEFAULT_ELECTION_DELAY
// perhaps there is an election collision requiring another publication (which times out) and a term bump
+ defaultMillis(PUBLISH_TIMEOUT_SETTING) + DEFAULT_ELECTION_DELAY
// then wait for the new leader to notice that the old leader is unresponsive
+ (defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING))
* defaultInt(FOLLOWER_CHECK_RETRY_COUNT_SETTING)
+ (defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING)) * defaultInt(
FOLLOWER_CHECK_RETRY_COUNT_SETTING
)
// then wait for the new leader to commit a state without the old leader
+ DEFAULT_CLUSTER_STATE_UPDATE_DELAY;
@ -262,14 +263,16 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final List<ClusterNode> clusterNodes;
final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(
Settings.builder().put(NODE_NAME_SETTING.getKey(), "deterministic-task-queue").build(), random());
Settings.builder().put(NODE_NAME_SETTING.getKey(), "deterministic-task-queue").build(),
random()
);
private boolean disruptStorage;
final VotingConfiguration initialConfiguration;
private final Set<String> disconnectedNodes = new HashSet<>();
private final Set<String> blackholedNodes = new HashSet<>();
private final Set<Tuple<String,String>> blackholedConnections = new HashSet<>();
private final Set<Tuple<String, String>> blackholedConnections = new HashSet<>();
private final Map<Long, ClusterState> committedStatesByVersion = new HashMap<>();
private final LinearizabilityChecker linearizabilityChecker = new LinearizabilityChecker();
private final History history = new History();
@ -292,8 +295,8 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this.nodeHealthService = nodeHealthService;
bigArrays = usually()
? BigArrays.NON_RECYCLING_INSTANCE
: new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
? BigArrays.NON_RECYCLING_INSTANCE
: new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY);
assertThat(initialNodeCount, greaterThan(0));
@ -301,19 +304,28 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final Set<String> masterEligibleNodeIds = new HashSet<>(initialNodeCount);
clusterNodes = new ArrayList<>(initialNodeCount);
for (int i = 0; i < initialNodeCount; i++) {
final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(),
allNodesMasterEligible || i == 0 || randomBoolean(), nodeSettings, nodeHealthService);
final ClusterNode clusterNode = new ClusterNode(
nextNodeIndex.getAndIncrement(),
allNodesMasterEligible || i == 0 || randomBoolean(),
nodeSettings,
nodeHealthService
);
clusterNodes.add(clusterNode);
if (clusterNode.getLocalNode().isMasterNode()) {
masterEligibleNodeIds.add(clusterNode.getId());
}
}
initialConfiguration = new VotingConfiguration(new HashSet<>(
randomSubsetOf(randomIntBetween(1, masterEligibleNodeIds.size()), masterEligibleNodeIds)));
initialConfiguration = new VotingConfiguration(
new HashSet<>(randomSubsetOf(randomIntBetween(1, masterEligibleNodeIds.size()), masterEligibleNodeIds))
);
logger.info("--> creating cluster of {} nodes (master-eligible nodes: {}) with initial configuration {}",
initialNodeCount, masterEligibleNodeIds, initialConfiguration);
logger.info(
"--> creating cluster of {} nodes (master-eligible nodes: {}) with initial configuration {}",
initialNodeCount,
masterEligibleNodeIds,
initialConfiguration
);
}
void addNodesAndStabilise(int newNodesCount) {
@ -330,7 +342,8 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
+ DEFAULT_DELAY_VARIABILITY
// Commit a new cluster state with the new node(s). Might be split into multiple commits, and each might need a
// followup reconfiguration
+ newNodesCount * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY);
+ newNodesCount * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY
);
}
List<ClusterNode> addNodes(int newNodesCount) {
@ -338,8 +351,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final List<ClusterNode> addedNodes = new ArrayList<>();
for (int i = 0; i < newNodesCount; i++) {
final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true, Settings.EMPTY,
nodeHealthService);
final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true, Settings.EMPTY, nodeHealthService);
addedNodes.add(clusterNode);
}
clusterNodes.addAll(addedNodes);
@ -394,8 +406,12 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY);
logger.debug("----> [runRandomly {}] reducing delay variability and running until [{}ms]", step, finishTime);
} else {
logger.debug("----> [runRandomly {}] running until [{}ms] with delay variability of [{}ms]", step, finishTime,
deterministicTaskQueue.getExecutionDelayVariabilityMillis());
logger.debug(
"----> [runRandomly {}] running until [{}ms] with delay variability of [{}ms]",
step,
finishTime,
deterministicTaskQueue.getExecutionDelayVariabilityMillis()
);
}
finishTime = deterministicTaskQueue.getLatestDeferredExecutionTime();
}
@ -406,45 +422,49 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final int key = randomIntBetween(0, keyRange);
final int newValue = randomInt();
clusterNode.onNode(() -> {
logger.debug("----> [runRandomly {}] proposing new value [{}] to [{}]",
thisStep, newValue, clusterNode.getId());
logger.debug(
"----> [runRandomly {}] proposing new value [{}] to [{}]",
thisStep,
newValue,
clusterNode.getId()
);
clusterNode.submitValue(key, newValue);
}).run();
} else if (finishTime == -1 && randomBoolean() && randomBoolean() && randomBoolean()) {
final ClusterNode clusterNode = getAnyNodePreferringLeaders();
final int key = randomIntBetween(0, keyRange);
clusterNode.onNode(() -> {
logger.debug("----> [runRandomly {}] reading value from [{}]",
thisStep, clusterNode.getId());
logger.debug("----> [runRandomly {}] reading value from [{}]", thisStep, clusterNode.getId());
clusterNode.readValue(key);
}).run();
} else if (rarely()) {
final ClusterNode clusterNode = getAnyNodePreferringLeaders();
final boolean autoShrinkVotingConfiguration = randomBoolean();
clusterNode.onNode(
() -> {
logger.debug("----> [runRandomly {}] setting auto-shrink configuration to {} on {}",
thisStep, autoShrinkVotingConfiguration, clusterNode.getId());
clusterNode.submitSetAutoShrinkVotingConfiguration(autoShrinkVotingConfiguration);
}).run();
clusterNode.onNode(() -> {
logger.debug(
"----> [runRandomly {}] setting auto-shrink configuration to {} on {}",
thisStep,
autoShrinkVotingConfiguration,
clusterNode.getId()
);
clusterNode.submitSetAutoShrinkVotingConfiguration(autoShrinkVotingConfiguration);
}).run();
} else if (allowReboots && rarely()) {
// reboot random node
final ClusterNode clusterNode = getAnyNode();
logger.debug("----> [runRandomly {}] rebooting [{}]", thisStep, clusterNode.getId());
clusterNode.close();
clusterNodes.forEach(
cn -> deterministicTaskQueue.scheduleNow(cn.onNode(
new Runnable() {
@Override
public void run() {
cn.transportService.disconnectFromNode(clusterNode.getLocalNode());
}
clusterNodes.forEach(cn -> deterministicTaskQueue.scheduleNow(cn.onNode(new Runnable() {
@Override
public void run() {
cn.transportService.disconnectFromNode(clusterNode.getLocalNode());
}
@Override
public String toString() {
return "disconnect from " + clusterNode.getLocalNode() + " after shutdown";
}
})));
@Override
public String toString() {
return "disconnect from " + clusterNode.getLocalNode() + " after shutdown";
}
})));
clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn);
} else if (rarely()) {
final ClusterNode clusterNode = getAnyNode();
@ -513,8 +533,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
if (storedState == null) {
committedStatesByVersion.put(applierState.getVersion(), applierState);
} else {
assertEquals("expected " + applierState + " but got " + storedState,
value(applierState), value(storedState));
assertEquals("expected " + applierState + " but got " + storedState, value(applierState), value(storedState));
}
}
}
@ -524,8 +543,11 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
void stabilise(long stabilisationDurationMillis) {
assertThat("stabilisation requires default delay variability (and proper cleanup of raised variability)",
deterministicTaskQueue.getExecutionDelayVariabilityMillis(), lessThanOrEqualTo(DEFAULT_DELAY_VARIABILITY));
assertThat(
"stabilisation requires default delay variability (and proper cleanup of raised variability)",
deterministicTaskQueue.getExecutionDelayVariabilityMillis(),
lessThanOrEqualTo(DEFAULT_DELAY_VARIABILITY)
);
assertFalse("stabilisation requires stable storage", disruptStorage);
bootstrapIfNecessary();
@ -543,10 +565,16 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet());
assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId));
assertThat(leaderId + " has no NO_MASTER_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false));
assertThat(leaderId + " has no STATE_NOT_RECOVERED_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false));
assertThat(
leaderId + " has no NO_MASTER_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false)
);
assertThat(
leaderId + " has no STATE_NOT_RECOVERED_BLOCK",
leader.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK),
equalTo(false)
);
assertThat(leaderId + " has applied its state ", leader.getLastAppliedClusterState().getVersion(), isEqualToLeaderVersion);
for (final ClusterNode clusterNode : clusterNodes) {
@ -562,51 +590,90 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
if (isConnectedPair(leader, clusterNode)) {
assertThat(nodeId + " is a follower of " + leaderId, clusterNode.coordinator.getMode(), is(FOLLOWER));
assertThat(nodeId + " has the same term as " + leaderId, clusterNode.coordinator.getCurrentTerm(), is(leaderTerm));
assertFalse(nodeId + " is not a missing vote for " + leaderId,
leader.coordinator.missingJoinVoteFrom(clusterNode.getLocalNode()));
assertThat(nodeId + " has the same accepted state as " + leaderId,
clusterNode.coordinator.getLastAcceptedState().getVersion(), isEqualToLeaderVersion);
assertFalse(
nodeId + " is not a missing vote for " + leaderId,
leader.coordinator.missingJoinVoteFrom(clusterNode.getLocalNode())
);
assertThat(
nodeId + " has the same accepted state as " + leaderId,
clusterNode.coordinator.getLastAcceptedState().getVersion(),
isEqualToLeaderVersion
);
if (clusterNode.getClusterStateApplyResponse() == ClusterStateApplyResponse.SUCCEED) {
assertThat(nodeId + " has the same applied state as " + leaderId,
clusterNode.getLastAppliedClusterState().getVersion(), isEqualToLeaderVersion);
assertTrue(nodeId + " is in its own latest applied state",
clusterNode.getLastAppliedClusterState().getNodes().nodeExists(nodeId));
assertThat(
nodeId + " has the same applied state as " + leaderId,
clusterNode.getLastAppliedClusterState().getVersion(),
isEqualToLeaderVersion
);
assertTrue(
nodeId + " is in its own latest applied state",
clusterNode.getLastAppliedClusterState().getNodes().nodeExists(nodeId)
);
}
assertTrue(nodeId + " is in the latest applied state on " + leaderId,
leader.getLastAppliedClusterState().getNodes().nodeExists(nodeId));
assertTrue(
nodeId + " is in the latest applied state on " + leaderId,
leader.getLastAppliedClusterState().getNodes().nodeExists(nodeId)
);
assertTrue(nodeId + " has been bootstrapped", clusterNode.coordinator.isInitialConfigurationSet());
assertThat(nodeId + " has correct master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(),
equalTo(leader.getLocalNode()));
assertThat(nodeId + " has no NO_MASTER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false));
assertThat(nodeId + " has no STATE_NOT_RECOVERED_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false));
assertThat(
nodeId + " has correct master",
clusterNode.getLastAppliedClusterState().nodes().getMasterNode(),
equalTo(leader.getLocalNode())
);
assertThat(
nodeId + " has no NO_MASTER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(false)
);
assertThat(
nodeId + " has no STATE_NOT_RECOVERED_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK),
equalTo(false)
);
} else {
assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE));
assertThat(nodeId + " has no master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), nullValue());
assertThat(nodeId + " has NO_MASTER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(true));
assertFalse(nodeId + " is not in the applied state on " + leaderId,
leader.getLastAppliedClusterState().getNodes().nodeExists(nodeId));
assertThat(
nodeId + " has NO_MASTER_BLOCK",
clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID),
equalTo(true)
);
assertFalse(
nodeId + " is not in the applied state on " + leaderId,
leader.getLastAppliedClusterState().getNodes().nodeExists(nodeId)
);
}
}
final Set<String> connectedNodeIds
= clusterNodes.stream().filter(n -> isConnectedPair(leader, n)).map(ClusterNode::getId).collect(Collectors.toSet());
final Set<String> connectedNodeIds = clusterNodes.stream()
.filter(n -> isConnectedPair(leader, n))
.map(ClusterNode::getId)
.collect(Collectors.toSet());
assertThat(leader.getLastAppliedClusterState().getNodes().getSize(), equalTo(connectedNodeIds.size()));
final ClusterState lastAcceptedState = leader.coordinator.getLastAcceptedState();
final VotingConfiguration lastCommittedConfiguration = lastAcceptedState.getLastCommittedConfiguration();
assertTrue(connectedNodeIds + " should be a quorum of " + lastCommittedConfiguration,
lastCommittedConfiguration.hasQuorum(connectedNodeIds));
assertThat("leader " + leader.getLocalNode() + " should be part of voting configuration " + lastCommittedConfiguration,
lastCommittedConfiguration.getNodeIds(), Matchers.hasItem(leader.getLocalNode().getId()));
assertTrue(
connectedNodeIds + " should be a quorum of " + lastCommittedConfiguration,
lastCommittedConfiguration.hasQuorum(connectedNodeIds)
);
assertThat(
"leader " + leader.getLocalNode() + " should be part of voting configuration " + lastCommittedConfiguration,
lastCommittedConfiguration.getNodeIds(),
Matchers.hasItem(leader.getLocalNode().getId())
);
assertThat("no reconfiguration is in progress",
lastAcceptedState.getLastCommittedConfiguration(), equalTo(lastAcceptedState.getLastAcceptedConfiguration()));
assertThat("current configuration is already optimal",
leader.improveConfiguration(lastAcceptedState), sameInstance(lastAcceptedState));
assertThat(
"no reconfiguration is in progress",
lastAcceptedState.getLastCommittedConfiguration(),
equalTo(lastAcceptedState.getLastAcceptedConfiguration())
);
assertThat(
"current configuration is already optimal",
leader.improveConfiguration(lastAcceptedState),
sameInstance(lastAcceptedState)
);
logger.info("checking linearizability of history with size {}: {}", history.size(), history);
final AtomicBoolean abort = new AtomicBoolean();
@ -671,10 +738,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
private boolean isConnectedPair(ClusterNode n1, ClusterNode n2) {
return n1 == n2 ||
(getConnectionStatus(n1.getLocalNode(), n2.getLocalNode()) == ConnectionStatus.CONNECTED
&& getConnectionStatus(n2.getLocalNode(), n1.getLocalNode()) == ConnectionStatus.CONNECTED) &&
(n1.nodeHealthService.getHealth().getStatus() == HEALTHY && n2.nodeHealthService.getHealth().getStatus() == HEALTHY);
return n1 == n2
|| (getConnectionStatus(n1.getLocalNode(), n2.getLocalNode()) == ConnectionStatus.CONNECTED
&& getConnectionStatus(n2.getLocalNode(), n1.getLocalNode()) == ConnectionStatus.CONNECTED)
&& (n1.nodeHealthService.getHealth().getStatus() == HEALTHY && n2.nodeHealthService.getHealth().getStatus() == HEALTHY);
}
ClusterNode getAnyLeader() {
@ -683,8 +750,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
return randomFrom(allLeaders);
}
private final ConnectionStatus preferredUnknownNodeConnectionStatus =
randomFrom(ConnectionStatus.DISCONNECTED, ConnectionStatus.BLACK_HOLE);
private final ConnectionStatus preferredUnknownNodeConnectionStatus = randomFrom(
ConnectionStatus.DISCONNECTED,
ConnectionStatus.BLACK_HOLE
);
private ConnectionStatus getConnectionStatus(DiscoveryNode sender, DiscoveryNode destination) {
ConnectionStatus connectionStatus;
@ -697,8 +766,9 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
} else if (nodeExists(sender) && nodeExists(destination)) {
connectionStatus = ConnectionStatus.CONNECTED;
} else {
connectionStatus = usually() ? preferredUnknownNodeConnectionStatus :
randomFrom(ConnectionStatus.DISCONNECTED, ConnectionStatus.BLACK_HOLE);
connectionStatus = usually()
? preferredUnknownNodeConnectionStatus
: randomFrom(ConnectionStatus.DISCONNECTED, ConnectionStatus.BLACK_HOLE);
}
return connectionStatus;
}
@ -708,9 +778,12 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
ClusterNode getAnyBootstrappableNode() {
return randomFrom(clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode())
.filter(n -> initialConfiguration.getNodeIds().contains(n.getLocalNode().getId()))
.collect(Collectors.toList()));
return randomFrom(
clusterNodes.stream()
.filter(n -> n.getLocalNode().isMasterNode())
.filter(n -> initialConfiguration.getNodeIds().contains(n.getLocalNode().getId()))
.collect(Collectors.toList())
);
}
ClusterNode getAnyNode() {
@ -725,8 +798,9 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
List<ClusterNode> getAllNodesExcept(ClusterNode... clusterNodes) {
Set<String> forbiddenIds = Arrays.stream(clusterNodes).map(ClusterNode::getId).collect(Collectors.toSet());
List<ClusterNode> acceptableNodes
= this.clusterNodes.stream().filter(n -> forbiddenIds.contains(n.getId()) == false).collect(Collectors.toList());
List<ClusterNode> acceptableNodes = this.clusterNodes.stream()
.filter(n -> forbiddenIds.contains(n.getId()) == false)
.collect(Collectors.toList());
return acceptableNodes;
}
@ -771,31 +845,43 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
delegate = gatewayMetaState.getPersistedState();
} else {
nodeEnvironment = null;
delegate = new InMemoryPersistedState(0L,
delegate = new InMemoryPersistedState(
0L,
ClusterStateUpdaters.addStateNotRecoveredBlock(
clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG,
VotingConfiguration.EMPTY_CONFIG, 0L)));
clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L)
)
);
}
} catch (IOException e) {
throw new UncheckedIOException("Unable to create MockPersistedState", e);
}
}
MockPersistedState(DiscoveryNode newLocalNode, MockPersistedState oldState,
Function<Metadata, Metadata> adaptGlobalMetadata, Function<Long, Long> adaptCurrentTerm) {
MockPersistedState(
DiscoveryNode newLocalNode,
MockPersistedState oldState,
Function<Metadata, Metadata> adaptGlobalMetadata,
Function<Long, Long> adaptCurrentTerm
) {
try {
if (oldState.nodeEnvironment != null) {
nodeEnvironment = oldState.nodeEnvironment;
final Metadata updatedMetadata = adaptGlobalMetadata.apply(oldState.getLastAcceptedState().metadata());
final long updatedTerm = adaptCurrentTerm.apply(oldState.getCurrentTerm());
if (updatedMetadata != oldState.getLastAcceptedState().metadata() || updatedTerm != oldState.getCurrentTerm()) {
try (PersistedClusterStateService.Writer writer =
new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), bigArrays,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
deterministicTaskQueue::getCurrentTimeMillis)
.createWriter()) {
writer.writeFullStateAndCommit(updatedTerm,
ClusterState.builder(oldState.getLastAcceptedState()).metadata(updatedMetadata).build());
try (
PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry(),
bigArrays,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
deterministicTaskQueue::getCurrentTimeMillis
).createWriter()
) {
writer.writeFullStateAndCommit(
updatedTerm,
ClusterState.builder(oldState.getLastAcceptedState()).metadata(updatedMetadata).build()
);
}
}
final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(newLocalNode, bigArrays);
@ -809,10 +895,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final long persistedCurrentTerm;
if ( // node is master-ineligible either before or after the restart ...
(oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false
// ... and it's accepted some non-initial state so we can roll back ...
(oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false
// ... and it's accepted some non-initial state so we can roll back ...
&& (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L)
// ... and we're feeling lucky ...
// ... and we're feeling lucky ...
&& randomBoolean()) {
// ... then we might not have reliably persisted the cluster state, so emulate a rollback
@ -829,21 +915,37 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
newLastAcceptedVersion = randomNonNegativeLong();
} else {
newLastAcceptedTerm = randomLongBetween(0L, Math.min(persistedCurrentTerm, lastAcceptedTerm));
newLastAcceptedVersion = randomLongBetween(0L,
newLastAcceptedTerm == lastAcceptedTerm ? lastAcceptedVersion - 1 : Long.MAX_VALUE);
newLastAcceptedVersion = randomLongBetween(
0L,
newLastAcceptedTerm == lastAcceptedTerm ? lastAcceptedVersion - 1 : Long.MAX_VALUE
);
}
final VotingConfiguration newVotingConfiguration
= new VotingConfiguration(randomBoolean() ? emptySet() : singleton(randomAlphaOfLength(10)));
final VotingConfiguration newVotingConfiguration = new VotingConfiguration(
randomBoolean() ? emptySet() : singleton(randomAlphaOfLength(10))
);
final long newValue = randomLong();
logger.trace("rolling back persisted cluster state on master-ineligible node [{}]: " +
"previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} " +
"but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}", newLocalNode,
oldState.getCurrentTerm(), lastAcceptedTerm, lastAcceptedVersion,
persistedCurrentTerm, newLastAcceptedTerm, newLastAcceptedVersion);
logger.trace(
"rolling back persisted cluster state on master-ineligible node [{}]: "
+ "previously currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={} "
+ "but now currentTerm={}, lastAcceptedTerm={}, lastAcceptedVersion={}",
newLocalNode,
oldState.getCurrentTerm(),
lastAcceptedTerm,
lastAcceptedVersion,
persistedCurrentTerm,
newLastAcceptedTerm,
newLastAcceptedVersion
);
clusterState(newLastAcceptedTerm, newLastAcceptedVersion, newLocalNode, newVotingConfiguration,
newVotingConfiguration, newValue).writeTo(outStream);
clusterState(
newLastAcceptedTerm,
newLastAcceptedVersion,
newLocalNode,
newVotingConfiguration,
newVotingConfiguration,
newValue
).writeTo(outStream);
} else {
persistedCurrentTerm = oldState.getCurrentTerm();
final Metadata updatedMetadata = adaptGlobalMetadata.apply(oldState.getLastAcceptedState().metadata());
@ -854,11 +956,15 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
}
StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(),
new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
StreamInput inStream = new NamedWriteableAwareStreamInput(
outStream.bytes().streamInput(),
new NamedWriteableRegistry(ClusterModule.getNamedWriteables())
);
// adapt cluster state to new localNode instance and add blocks
delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(persistedCurrentTerm),
ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode)));
delegate = new InMemoryPersistedState(
adaptCurrentTerm.apply(persistedCurrentTerm),
ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode))
);
}
} catch (IOException e) {
throw new UncheckedIOException("Unable to create MockPersistedState", e);
@ -923,14 +1029,23 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private NodeHealthService nodeHealthService;
List<BiConsumer<DiscoveryNode, ClusterState>> extraJoinValidators = new ArrayList<>();
ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings, NodeHealthService nodeHealthService) {
this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier, nodeSettings,
nodeHealthService);
this(
nodeIndex,
createDiscoveryNode(nodeIndex, masterEligible),
defaultPersistedStateSupplier,
nodeSettings,
nodeHealthService
);
}
ClusterNode(int nodeIndex, DiscoveryNode localNode, Function<DiscoveryNode, MockPersistedState> persistedStateSupplier,
Settings nodeSettings, NodeHealthService nodeHealthService) {
ClusterNode(
int nodeIndex,
DiscoveryNode localNode,
Function<DiscoveryNode, MockPersistedState> persistedStateSupplier,
Settings nodeSettings,
NodeHealthService nodeHealthService
) {
this.nodeHealthService = nodeHealthService;
this.nodeIndex = nodeIndex;
this.localNode = localNode;
@ -963,34 +1078,77 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
@Override
protected Optional<DisruptableMockTransport> getDisruptableMockTransport(TransportAddress address) {
return clusterNodes.stream().map(cn -> cn.mockTransport)
.filter(transport -> transport.getLocalNode().getAddress().equals(address)).findAny();
return clusterNodes.stream()
.map(cn -> cn.mockTransport)
.filter(transport -> transport.getLocalNode().getAddress().equals(address))
.findAny();
}
};
final Settings settings = nodeSettings.hasValue(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()) ?
nodeSettings : Settings.builder().put(nodeSettings)
.putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(),
ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap
transportService = mockTransport.createTransportService(settings, threadPool,
getTransportInterceptor(localNode, threadPool), a -> localNode, null, emptySet());
masterService = new AckedFakeThreadPoolMasterService(localNode.getId(), "test", threadPool,
runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable)));
final Settings settings = nodeSettings.hasValue(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey())
? nodeSettings
: Settings.builder()
.put(nodeSettings)
.putList(
ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(),
ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)
)
.build(); // suppress auto-bootstrap
transportService = mockTransport.createTransportService(
settings,
threadPool,
getTransportInterceptor(localNode, threadPool),
a -> localNode,
null,
emptySet()
);
masterService = new AckedFakeThreadPoolMasterService(
localNode.getId(),
"test",
threadPool,
runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable))
);
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
clusterApplierService = new DisruptableClusterApplierService(localNode.getId(), settings, clusterSettings,
deterministicTaskQueue, threadPool);
clusterApplierService = new DisruptableClusterApplierService(
localNode.getId(),
settings,
clusterSettings,
deterministicTaskQueue,
threadPool
);
clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService);
clusterService.setNodeConnectionsService(
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService));
final Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators =
Collections.singletonList((dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs)));
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService)
);
final Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators = Collections.singletonList(
(dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs))
);
final AllocationService allocationService = OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY);
coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(),
allocationService, masterService, this::getPersistedState,
Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get(), (s, p, r) -> {},
getElectionStrategy(), nodeHealthService);
coordinator = new Coordinator(
"test_node",
settings,
clusterSettings,
transportService,
writableRegistry(),
allocationService,
masterService,
this::getPersistedState,
Cluster.this::provideSeedHosts,
clusterApplierService,
onJoinValidators,
Randomness.get(),
(s, p, r) -> {},
getElectionStrategy(),
nodeHealthService
);
masterService.setClusterStatePublisher(coordinator);
final GatewayService gatewayService
= new GatewayService(settings, allocationService, clusterService, threadPool, null, coordinator);
final GatewayService gatewayService = new GatewayService(
settings,
allocationService,
clusterService,
threadPool,
null,
coordinator
);
logger.trace("starting up [{}]", localNode);
transportService.start();
@ -1007,10 +1165,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
logger.trace("closing");
coordinator.stop();
clusterService.stop();
//transportService.stop(); // does blocking stuff :/
// transportService.stop(); // does blocking stuff :/
clusterService.close();
coordinator.close();
//transportService.close(); // does blocking stuff :/
// transportService.close(); // does blocking stuff :/
}).run();
}
@ -1018,17 +1176,30 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
return restartedNode(Function.identity(), Function.identity(), nodeSettings);
}
ClusterNode restartedNode(Function<Metadata, Metadata> adaptGlobalMetadata, Function<Long, Long> adaptCurrentTerm,
Settings nodeSettings) {
ClusterNode restartedNode(
Function<Metadata, Metadata> adaptGlobalMetadata,
Function<Long, Long> adaptCurrentTerm,
Settings nodeSettings
) {
final TransportAddress address = randomBoolean() ? buildNewFakeTransportAddress() : localNode.getAddress();
final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(),
final DiscoveryNode newLocalNode = new DiscoveryNode(
localNode.getName(),
localNode.getId(),
UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests
address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(),
localNode.isMasterNode() && DiscoveryNode.isMasterNode(nodeSettings)
? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), Version.CURRENT);
return new ClusterNode(nodeIndex, newLocalNode,
node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetadata, adaptCurrentTerm), nodeSettings,
nodeHealthService);
address.address().getHostString(),
address.getAddress(),
address,
Collections.emptyMap(),
localNode.isMasterNode() && DiscoveryNode.isMasterNode(nodeSettings) ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(),
Version.CURRENT
);
return new ClusterNode(
nodeIndex,
newLocalNode,
node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetadata, adaptCurrentTerm),
nodeSettings,
nodeHealthService
);
}
private CoordinationState.PersistedState getPersistedState() {
@ -1085,15 +1256,22 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
void submitSetAutoShrinkVotingConfiguration(final boolean autoShrinkVotingConfiguration) {
submitUpdateTask("set master nodes failure tolerance [" + autoShrinkVotingConfiguration + "]", cs ->
ClusterState.builder(cs).metadata(
Metadata.builder(cs.metadata())
.persistentSettings(Settings.builder()
.put(cs.metadata().persistentSettings())
.put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration)
.build())
.build())
.build(), (source, e) -> {});
submitUpdateTask(
"set master nodes failure tolerance [" + autoShrinkVotingConfiguration + "]",
cs -> ClusterState.builder(cs)
.metadata(
Metadata.builder(cs.metadata())
.persistentSettings(
Settings.builder()
.put(cs.metadata().persistentSettings())
.put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration)
.build()
)
.build()
)
.build(),
(source, e) -> {}
);
}
AckCollector submitValue(final long value) {
@ -1140,43 +1318,45 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
});
}
AckCollector submitUpdateTask(String source, UnaryOperator<ClusterState> clusterStateUpdate,
ClusterStateTaskListener taskListener) {
AckCollector submitUpdateTask(
String source,
UnaryOperator<ClusterState> clusterStateUpdate,
ClusterStateTaskListener taskListener
) {
final AckCollector ackCollector = new AckCollector();
onNode(() -> {
logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source);
final long submittedTerm = coordinator.getCurrentTerm();
masterService.submitStateUpdateTask(source,
new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm));
masterService.nextAckCollector = ackCollector;
return clusterStateUpdate.apply(currentState);
}
masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm));
masterService.nextAckCollector = ackCollector;
return clusterStateUpdate.apply(currentState);
}
@Override
public void onFailure(String source, Exception e) {
logger.debug(() -> new ParameterizedMessage("failed to publish: [{}]", source), e);
taskListener.onFailure(source, e);
}
@Override
public void onFailure(String source, Exception e) {
logger.debug(() -> new ParameterizedMessage("failed to publish: [{}]", source), e);
taskListener.onFailure(source, e);
}
@Override
public void onNoLongerMaster(String source) {
logger.trace("no longer master: [{}]", source);
taskListener.onNoLongerMaster(source);
}
@Override
public void onNoLongerMaster(String source) {
logger.trace("no longer master: [{}]", source);
taskListener.onNoLongerMaster(source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateCommittedStates();
ClusterState state = committedStatesByVersion.get(newState.version());
assertNotNull("State not committed : " + newState.toString(), state);
assertStateEquals(state, newState);
logger.trace("successfully published: [{}]", newState);
taskListener.clusterStateProcessed(source, oldState, newState);
}
});
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateCommittedStates();
ClusterState state = committedStatesByVersion.get(newState.version());
assertNotNull("State not committed : " + newState.toString(), state);
assertStateEquals(state, newState);
logger.trace("successfully published: [{}]", newState);
taskListener.clusterStateProcessed(source, oldState, newState);
}
});
}).run();
return ackCollector;
}
@ -1222,7 +1402,8 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
.limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2)
.forEach(nodeIdsWithPlaceholders::add);
final Set<String> nodeIds = new HashSet<>(
randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders));
randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders)
);
// initial configuration should not have a place holder for local node
if (initialConfiguration.getNodeIds().contains(localNode.getId()) && nodeIds.contains(localNode.getId()) == false) {
nodeIds.remove(nodeIds.iterator().next());
@ -1233,8 +1414,10 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
coordinator.setInitialConfiguration(configurationWithPlaceholders);
logger.info("successfully set initial configuration to {}", configurationWithPlaceholders);
} catch (CoordinationStateRejectedException e) {
logger.info(new ParameterizedMessage("failed to set initial configuration to {}",
configurationWithPlaceholders), e);
logger.info(
new ParameterizedMessage("failed to set initial configuration to {}", configurationWithPlaceholders),
e
);
}
}).run();
}
@ -1249,7 +1432,8 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
private List<TransportAddress> provideSeedHosts(SeedHostsProvider.HostsResolver ignored) {
return seedHostsList != null ? seedHostsList
return seedHostsList != null
? seedHostsList
: clusterNodes.stream().map(ClusterNode::getLocalNode).map(DiscoveryNode::getAddress).collect(Collectors.toList());
}
}
@ -1292,8 +1476,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
private final List<DiscoveryNode> unsuccessfulNodes = new ArrayList<>();
@Override
public void onCommit(TimeValue commitTime) {
}
public void onCommit(TimeValue commitTime) {}
@Override
public void onNodeAck(DiscoveryNode node, Exception e) {
@ -1327,8 +1510,12 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
AckCollector nextAckCollector = new AckCollector();
AckedFakeThreadPoolMasterService(String nodeName, String serviceName, ThreadPool threadPool,
Consumer<Runnable> onTaskAvailableToRun) {
AckedFakeThreadPoolMasterService(
String nodeName,
String serviceName,
ThreadPool threadPool,
Consumer<Runnable> onTaskAvailableToRun
) {
super(nodeName, serviceName, threadPool, onTaskAvailableToRun);
}
@ -1358,8 +1545,13 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
ClusterStateApplyResponse clusterStateApplyResponse = ClusterStateApplyResponse.SUCCEED;
private boolean applicationMayFail;
DisruptableClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings,
DeterministicTaskQueue deterministicTaskQueue, ThreadPool threadPool) {
DisruptableClusterApplierService(
String nodeName,
Settings settings,
ClusterSettings clusterSettings,
DeterministicTaskQueue deterministicTaskQueue,
ThreadPool threadPool
) {
super(nodeName, settings, clusterSettings, threadPool);
this.nodeName = nodeName;
this.deterministicTaskQueue = deterministicTaskQueue;
@ -1370,7 +1562,9 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
final ClusterState oldClusterState = event.previousState();
final ClusterState newClusterState = event.state();
assert oldClusterState.version() <= newClusterState.version() : "updating cluster state from version "
+ oldClusterState.version() + " to stale version " + newClusterState.version();
+ oldClusterState.version()
+ " to stale version "
+ newClusterState.version();
break;
case FAIL:
throw new OpenSearchException("simulated cluster state applier failure");
@ -1414,10 +1608,17 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
protected DiscoveryNode createDiscoveryNode(int nodeIndex, boolean masterEligible) {
final TransportAddress address = buildNewFakeTransportAddress();
return new DiscoveryNode("", "node" + nodeIndex,
return new DiscoveryNode(
"",
"node" + nodeIndex,
UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests
address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(),
masterEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), Version.CURRENT);
address.address().getHostString(),
address.getAddress(),
address,
Collections.emptyMap(),
masterEligible ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(),
Version.CURRENT
);
}
/**
@ -1441,13 +1642,14 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
public ClusterState setValue(ClusterState clusterState, int key, long value) {
return ClusterState.builder(clusterState).metadata(
Metadata.builder(clusterState.metadata())
.persistentSettings(Settings.builder()
.put(clusterState.metadata().persistentSettings())
.put("value_" + key, value)
.build())
.build())
return ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.persistentSettings(
Settings.builder().put(clusterState.metadata().persistentSettings()).put("value_" + key, value).build()
)
.build()
)
.build();
}
@ -1469,8 +1671,13 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
public Set<Integer> keySet(ClusterState clusterState) {
return clusterState.metadata().persistentSettings().keySet().stream()
.filter(s -> s.startsWith("value_")).map(s -> Integer.valueOf(s.substring("value_".length()))).collect(Collectors.toSet());
return clusterState.metadata()
.persistentSettings()
.keySet()
.stream()
.filter(s -> s.startsWith("value_"))
.map(s -> Integer.valueOf(s.substring("value_".length())))
.collect(Collectors.toSet());
}
/**

View File

@ -62,38 +62,60 @@ import static org.junit.Assert.assertThat;
public class CoordinationStateTestCluster {
public static ClusterState clusterState(long term, long version, DiscoveryNode localNode,
CoordinationMetadata.VotingConfiguration lastCommittedConfig,
CoordinationMetadata.VotingConfiguration lastAcceptedConfig, long value) {
return clusterState(term, version, DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build(),
lastCommittedConfig, lastAcceptedConfig, value);
public static ClusterState clusterState(
long term,
long version,
DiscoveryNode localNode,
CoordinationMetadata.VotingConfiguration lastCommittedConfig,
CoordinationMetadata.VotingConfiguration lastAcceptedConfig,
long value
) {
return clusterState(
term,
version,
DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build(),
lastCommittedConfig,
lastAcceptedConfig,
value
);
}
public static ClusterState clusterState(long term, long version, DiscoveryNodes discoveryNodes,
CoordinationMetadata.VotingConfiguration lastCommittedConfig,
CoordinationMetadata.VotingConfiguration lastAcceptedConfig, long value) {
return setValue(ClusterState.builder(ClusterName.DEFAULT)
.version(version)
.nodes(discoveryNodes)
.metadata(Metadata.builder()
.clusterUUID(UUIDs.randomBase64UUID(random())) // generate cluster UUID deterministically for repeatable tests
.coordinationMetadata(CoordinationMetadata.builder()
.term(term)
.lastCommittedConfiguration(lastCommittedConfig)
.lastAcceptedConfiguration(lastAcceptedConfig)
.build()))
.stateUUID(UUIDs.randomBase64UUID(random())) // generate cluster state UUID deterministically for repeatable tests
.build(), value);
public static ClusterState clusterState(
long term,
long version,
DiscoveryNodes discoveryNodes,
CoordinationMetadata.VotingConfiguration lastCommittedConfig,
CoordinationMetadata.VotingConfiguration lastAcceptedConfig,
long value
) {
return setValue(
ClusterState.builder(ClusterName.DEFAULT)
.version(version)
.nodes(discoveryNodes)
.metadata(
Metadata.builder()
.clusterUUID(UUIDs.randomBase64UUID(random())) // generate cluster UUID deterministically for repeatable tests
.coordinationMetadata(
CoordinationMetadata.builder()
.term(term)
.lastCommittedConfiguration(lastCommittedConfig)
.lastAcceptedConfiguration(lastAcceptedConfig)
.build()
)
)
.stateUUID(UUIDs.randomBase64UUID(random())) // generate cluster state UUID deterministically for repeatable tests
.build(),
value
);
}
public static ClusterState setValue(ClusterState clusterState, long value) {
return ClusterState.builder(clusterState).metadata(
Metadata.builder(clusterState.metadata())
.persistentSettings(Settings.builder()
.put(clusterState.metadata().persistentSettings())
.put("value", value)
.build())
.build())
return ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata())
.persistentSettings(Settings.builder().put(clusterState.metadata().persistentSettings()).put("value", value).build())
.build()
)
.build();
}
@ -110,9 +132,17 @@ public class CoordinationStateTestCluster {
ClusterNode(DiscoveryNode localNode, ElectionStrategy electionStrategy) {
this.localNode = localNode;
persistedState = new InMemoryPersistedState(0L,
clusterState(0L, 0L, localNode, CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG,
CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG, 0L));
persistedState = new InMemoryPersistedState(
0L,
clusterState(
0L,
0L,
localNode,
CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG,
CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG,
0L
)
);
this.electionStrategy = electionStrategy;
state = new CoordinationState(localNode, persistedState, electionStrategy);
}
@ -121,12 +151,15 @@ public class CoordinationStateTestCluster {
if (localNode.isMasterNode() == false && rarely()) {
// master-ineligible nodes can't be trusted to persist the cluster state properly, but will not lose the fact that they
// were bootstrapped
final CoordinationMetadata.VotingConfiguration votingConfiguration
= persistedState.getLastAcceptedState().getLastAcceptedConfiguration().isEmpty()
final CoordinationMetadata.VotingConfiguration votingConfiguration = persistedState.getLastAcceptedState()
.getLastAcceptedConfiguration()
.isEmpty()
? CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG
: CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER;
persistedState
= new InMemoryPersistedState(0L, clusterState(0L, 0L, localNode, votingConfiguration, votingConfiguration, 0L));
persistedState = new InMemoryPersistedState(
0L,
clusterState(0L, 0L, localNode, votingConfiguration, votingConfiguration, 0L)
);
}
final Set<DiscoveryNodeRole> roles = new HashSet<>(localNode.getRoles());
@ -138,20 +171,32 @@ public class CoordinationStateTestCluster {
}
}
localNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()),
localNode.getHostName(), localNode.getHostAddress(), localNode.getAddress(), localNode.getAttributes(),
roles, localNode.getVersion());
localNode = new DiscoveryNode(
localNode.getName(),
localNode.getId(),
UUIDs.randomBase64UUID(random()),
localNode.getHostName(),
localNode.getHostAddress(),
localNode.getAddress(),
localNode.getAttributes(),
roles,
localNode.getVersion()
);
state = new CoordinationState(localNode, persistedState, electionStrategy);
}
void setInitialState(CoordinationMetadata.VotingConfiguration initialConfig, long initialValue) {
final ClusterState.Builder builder = ClusterState.builder(state.getLastAcceptedState());
builder.metadata(Metadata.builder()
.coordinationMetadata(CoordinationMetadata.builder()
.lastAcceptedConfiguration(initialConfig)
.lastCommittedConfiguration(initialConfig)
.build()));
builder.metadata(
Metadata.builder()
.coordinationMetadata(
CoordinationMetadata.builder()
.lastAcceptedConfiguration(initialConfig)
.lastCommittedConfiguration(initialConfig)
.build()
)
);
state.setInitialState(setValue(builder.build(), initialValue));
}
}
@ -166,9 +211,7 @@ public class CoordinationStateTestCluster {
this.electionStrategy = electionStrategy;
messages = new ArrayList<>();
clusterNodes = nodes.stream()
.map(node -> new ClusterNode(node, electionStrategy))
.collect(Collectors.toList());
clusterNodes = nodes.stream().map(node -> new ClusterNode(node, electionStrategy)).collect(Collectors.toList());
initialConfiguration = randomVotingConfig();
initialValue = randomLong();
@ -200,8 +243,8 @@ public class CoordinationStateTestCluster {
CoordinationMetadata.VotingConfiguration randomVotingConfig() {
return new CoordinationMetadata.VotingConfiguration(
randomSubsetOf(randomIntBetween(1, clusterNodes.size()), clusterNodes).stream()
.map(cn -> cn.localNode.getId()).collect(toSet()));
randomSubsetOf(randomIntBetween(1, clusterNodes.size()), clusterNodes).stream().map(cn -> cn.localNode.getId()).collect(toSet())
);
}
void applyMessage(Message message) {
@ -242,17 +285,26 @@ public class CoordinationStateTestCluster {
} else if (rarely() && rarely()) {
randomFrom(clusterNodes).reboot();
} else if (rarely()) {
final List<ClusterNode> masterNodes = clusterNodes.stream().filter(cn -> cn.state.electionWon())
final List<ClusterNode> masterNodes = clusterNodes.stream()
.filter(cn -> cn.state.electionWon())
.collect(Collectors.toList());
if (masterNodes.isEmpty() == false) {
final ClusterNode clusterNode = randomFrom(masterNodes);
final long term = rarely() ? randomLongBetween(0, maxTerm + 1) : clusterNode.state.getCurrentTerm();
final long version = rarely() ? randomIntBetween(0, 5) : clusterNode.state.getLastPublishedVersion() + 1;
final CoordinationMetadata.VotingConfiguration acceptedConfig = rarely() ? randomVotingConfig() :
clusterNode.state.getLastAcceptedConfiguration();
final CoordinationMetadata.VotingConfiguration acceptedConfig = rarely()
? randomVotingConfig()
: clusterNode.state.getLastAcceptedConfiguration();
final PublishRequest publishRequest = clusterNode.state.handleClientValue(
clusterState(term, version, clusterNode.localNode, clusterNode.state.getLastCommittedConfiguration(),
acceptedConfig, randomLong()));
clusterState(
term,
version,
clusterNode.localNode,
clusterNode.state.getLastCommittedConfiguration(),
acceptedConfig,
randomLong()
)
);
broadcast(clusterNode.localNode, publishRequest);
}
} else if (messages.isEmpty() == false) {
@ -272,7 +324,8 @@ public class CoordinationStateTestCluster {
void invariant() {
// one master per term
messages.stream().filter(m -> m.payload instanceof PublishRequest)
messages.stream()
.filter(m -> m.payload instanceof PublishRequest)
.collect(Collectors.groupingBy(m -> ((PublishRequest) m.payload).getAcceptedState().term()))
.forEach((term, publishMessages) -> {
Set<DiscoveryNode> mastersForTerm = publishMessages.stream().collect(Collectors.groupingBy(m -> m.sourceNode)).keySet();
@ -280,25 +333,31 @@ public class CoordinationStateTestCluster {
});
// unique cluster state per (term, version) pair
messages.stream().filter(m -> m.payload instanceof PublishRequest)
messages.stream()
.filter(m -> m.payload instanceof PublishRequest)
.map(m -> ((PublishRequest) m.payload).getAcceptedState())
.collect(Collectors.groupingBy(ClusterState::term))
.forEach((term, clusterStates) -> {
clusterStates.stream().collect(Collectors.groupingBy(ClusterState::version))
.forEach((version, clusterStates1) -> {
Set<String> clusterStateUUIDsForTermAndVersion = clusterStates1.stream().collect(Collectors.groupingBy(
ClusterState::stateUUID
)).keySet();
assertThat("Multiple cluster states " + clusterStates1 + " for term " + term + " and version " + version,
clusterStateUUIDsForTermAndVersion, hasSize(1));
clusterStates.stream().collect(Collectors.groupingBy(ClusterState::version)).forEach((version, clusterStates1) -> {
Set<String> clusterStateUUIDsForTermAndVersion = clusterStates1.stream()
.collect(Collectors.groupingBy(ClusterState::stateUUID))
.keySet();
assertThat(
"Multiple cluster states " + clusterStates1 + " for term " + term + " and version " + version,
clusterStateUUIDsForTermAndVersion,
hasSize(1)
);
Set<Long> clusterStateValuesForTermAndVersion = clusterStates1.stream().collect(Collectors.groupingBy(
CoordinationStateTestCluster::value
)).keySet();
Set<Long> clusterStateValuesForTermAndVersion = clusterStates1.stream()
.collect(Collectors.groupingBy(CoordinationStateTestCluster::value))
.keySet();
assertThat("Multiple cluster states " + clusterStates1 + " for term " + term + " and version " + version,
clusterStateValuesForTermAndVersion, hasSize(1));
});
assertThat(
"Multiple cluster states " + clusterStates1 + " for term " + term + " and version " + version,
clusterStateValuesForTermAndVersion,
hasSize(1)
);
});
});
}
}

View File

@ -514,10 +514,7 @@ public class DeterministicTaskQueue {
@Override
public String toString() {
return "DeferredTask{" +
"executionTimeMillis=" + executionTimeMillis +
", task=" + task +
'}';
return "DeferredTask{" + "executionTimeMillis=" + executionTimeMillis + ", task=" + task + '}';
}
}
}

View File

@ -188,6 +188,7 @@ public class LinearizabilityChecker {
public List<Event> copyEvents() {
return new ArrayList<>(events);
}
/**
* Completes the history with response events for invocations that are missing corresponding responses
*
@ -224,10 +225,7 @@ public class LinearizabilityChecker {
@Override
public String toString() {
return "History{" +
"events=" + events +
", nextId=" + nextId +
'}';
return "History{" + "events=" + events + ", nextId=" + nextId + '}';
}
}
@ -253,8 +251,12 @@ public class LinearizabilityChecker {
* @param terminateEarly a condition upon which to terminate early
* @return true iff the history is linearizable w.r.t. the given spec
*/
public boolean isLinearizable(SequentialSpec spec, History history, Function<Object, Object> missingResponseGenerator,
BooleanSupplier terminateEarly) {
public boolean isLinearizable(
SequentialSpec spec,
History history,
Function<Object, Object> missingResponseGenerator,
BooleanSupplier terminateEarly
) {
history = history.clone(); // clone history before completing it
history.complete(missingResponseGenerator); // complete history
final Collection<List<Event>> partitions = spec.partition(history.copyEvents());
@ -313,9 +315,7 @@ public class LinearizabilityChecker {
* Convenience method for {@link #isLinearizable(SequentialSpec, History, Function)} that requires the history to be complete
*/
public boolean isLinearizable(SequentialSpec spec, History history) {
return isLinearizable(spec, history, o -> {
throw new IllegalArgumentException("history is not complete");
});
return isLinearizable(spec, history, o -> { throw new IllegalArgumentException("history is not complete"); });
}
/**
@ -328,9 +328,10 @@ public class LinearizabilityChecker {
StringBuilder builder = new StringBuilder();
partitions.forEach(new Consumer<List<Event>>() {
int index = 0;
@Override
public void accept(List<Event> events) {
builder.append("Partition " ).append(index++).append("\n");
builder.append("Partition ").append(index++).append("\n");
builder.append(visualizePartition(events));
}
});
@ -361,9 +362,14 @@ public class LinearizabilityChecker {
int beginIndex = eventToPosition.get(Tuple.tuple(EventType.INVOCATION, id));
int endIndex = eventToPosition.get(Tuple.tuple(EventType.RESPONSE, id));
input = input.substring(0, Math.min(beginIndex + 25, input.length()));
return Strings.padStart(input, beginIndex + 25, ' ') +
" " + Strings.padStart("", endIndex-beginIndex, 'X') + " "
+ output + " (" + entry.event.id + ")";
return Strings.padStart(input, beginIndex + 25, ' ')
+ " "
+ Strings.padStart("", endIndex - beginIndex, 'X')
+ " "
+ output
+ " ("
+ entry.event.id
+ ")";
}
/**
@ -431,11 +437,7 @@ public class LinearizabilityChecker {
@Override
public String toString() {
return "Event{" +
"type=" + type +
", value=" + value +
", id=" + id +
'}';
return "Event{" + "type=" + type + ", value=" + value + ", id=" + id + '}';
}
}
@ -473,7 +475,6 @@ public class LinearizabilityChecker {
}
}
/**
* A cache optimized for small bit-counts (less than 64) and small number of unique permutations of state objects.
*
@ -511,10 +512,8 @@ public class LinearizabilityChecker {
private boolean addInternal(Object state, FixedBitSet bitSet) {
long[] bits = bitSet.getBits();
if (bits.length == 1)
return addSmall(state, bits[0]);
else
return addLarge(state, bitSet);
if (bits.length == 1) return addSmall(state, bits[0]);
else return addLarge(state, bitSet);
}
private boolean addSmall(Object state, long bits) {
@ -524,8 +523,7 @@ public class LinearizabilityChecker {
states = Collections.singleton(state);
} else {
Set<Object> oldStates = smallMap.indexGet(index);
if (oldStates.contains(state))
return false;
if (oldStates.contains(state)) return false;
states = new HashSet<>(oldStates.size() + 1);
states.addAll(oldStates);
states.add(state);

View File

@ -44,28 +44,26 @@ import java.util.concurrent.TimeUnit;
public class MockSinglePrioritizingExecutor extends PrioritizedOpenSearchThreadPoolExecutor {
public MockSinglePrioritizingExecutor(String name, DeterministicTaskQueue deterministicTaskQueue, ThreadPool threadPool) {
super(name, 0, 1, 0L, TimeUnit.MILLISECONDS,
r -> new Thread() {
@Override
public void start() {
deterministicTaskQueue.scheduleNow(new Runnable() {
@Override
public void run() {
try {
r.run();
} catch (KillWorkerError kwe) {
// hacks everywhere
}
super(name, 0, 1, 0L, TimeUnit.MILLISECONDS, r -> new Thread() {
@Override
public void start() {
deterministicTaskQueue.scheduleNow(new Runnable() {
@Override
public void run() {
try {
r.run();
} catch (KillWorkerError kwe) {
// hacks everywhere
}
}
@Override
public String toString() {
return r.toString();
}
});
}
},
threadPool.getThreadContext(), threadPool.scheduler());
@Override
public String toString() {
return r.toString();
}
});
}
}, threadPool.getThreadContext(), threadPool.scheduler());
}
@Override

View File

@ -32,11 +32,7 @@
package org.opensearch.cluster.routing;
import org.opensearch.cluster.routing.RecoverySource;
import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.UnassignedInfo;
/**
* A helper class that allows access to package private APIs for testing.
@ -64,9 +60,17 @@ public class ShardRoutingHelper {
}
public static ShardRouting initWithSameId(ShardRouting copy, RecoverySource recoverySource) {
return new ShardRouting(copy.shardId(), copy.currentNodeId(), copy.relocatingNodeId(),
copy.primary(), ShardRoutingState.INITIALIZING, recoverySource, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
copy.allocationId(), copy.getExpectedShardSize());
return new ShardRouting(
copy.shardId(),
copy.currentNodeId(),
copy.relocatingNodeId(),
copy.primary(),
ShardRoutingState.INITIALIZING,
recoverySource,
new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
copy.allocationId(),
copy.getExpectedShardSize()
);
}
public static ShardRouting moveToUnassigned(ShardRouting routing, UnassignedInfo info) {
@ -74,7 +78,16 @@ public class ShardRoutingHelper {
}
public static ShardRouting newWithRestoreSource(ShardRouting routing, SnapshotRecoverySource recoverySource) {
return new ShardRouting(routing.shardId(), routing.currentNodeId(), routing.relocatingNodeId(), routing.primary(), routing.state(),
recoverySource, routing.unassignedInfo(), routing.allocationId(), routing.getExpectedShardSize());
return new ShardRouting(
routing.shardId(),
routing.currentNodeId(),
routing.relocatingNodeId(),
routing.primary(),
routing.state(),
recoverySource,
routing.unassignedInfo(),
routing.allocationId(),
routing.getExpectedShardSize()
);
}
}

View File

@ -55,52 +55,154 @@ public class TestShardRouting {
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) {
return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state),
buildUnassignedInfo(state), buildAllocationId(state), -1);
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
state,
buildRecoveryTarget(primary, state),
buildUnassignedInfo(state),
buildAllocationId(state),
-1
);
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary,
ShardRoutingState state, RecoverySource recoverySource) {
return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource,
buildUnassignedInfo(state), buildAllocationId(state), -1);
public static ShardRouting newShardRouting(
ShardId shardId,
String currentNodeId,
boolean primary,
ShardRoutingState state,
RecoverySource recoverySource
) {
return new ShardRouting(
shardId,
currentNodeId,
null,
primary,
state,
recoverySource,
buildUnassignedInfo(state),
buildAllocationId(state),
-1
);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId,
boolean primary, ShardRoutingState state) {
return newShardRouting(new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId), currentNodeId,
relocatingNodeId, primary, state);
public static ShardRouting newShardRouting(
String index,
int shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state
) {
return newShardRouting(
new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId),
currentNodeId,
relocatingNodeId,
primary,
state
);
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId,
boolean primary, ShardRoutingState state) {
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state,
buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
public static ShardRouting newShardRouting(
ShardId shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state
) {
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
state,
buildRecoveryTarget(primary, state),
buildUnassignedInfo(state),
buildAllocationId(state),
-1
);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
return newShardRouting(new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId), currentNodeId,
relocatingNodeId, primary, state, allocationId);
public static ShardRouting newShardRouting(
String index,
int shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state,
AllocationId allocationId
) {
return newShardRouting(
new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId),
currentNodeId,
relocatingNodeId,
primary,
state,
allocationId
);
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary,
ShardRoutingState state, AllocationId allocationId) {
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state,
buildRecoveryTarget(primary, state), buildUnassignedInfo(state), allocationId, -1);
public static ShardRouting newShardRouting(
ShardId shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state,
AllocationId allocationId
) {
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
state,
buildRecoveryTarget(primary, state),
buildUnassignedInfo(state),
allocationId,
-1
);
}
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
return newShardRouting(new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId,
primary, state, unassignedInfo);
public static ShardRouting newShardRouting(
String index,
int shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state,
UnassignedInfo unassignedInfo
) {
return newShardRouting(
new ShardId(index, IndexMetadata.INDEX_UUID_NA_VALUE, shardId),
currentNodeId,
relocatingNodeId,
primary,
state,
unassignedInfo
);
}
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId,
String relocatingNodeId, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state),
unassignedInfo, buildAllocationId(state), -1);
public static ShardRouting newShardRouting(
ShardId shardId,
String currentNodeId,
String relocatingNodeId,
boolean primary,
ShardRoutingState state,
UnassignedInfo unassignedInfo
) {
return new ShardRouting(
shardId,
currentNodeId,
relocatingNodeId,
primary,
state,
buildRecoveryTarget(primary, state),
unassignedInfo,
buildAllocationId(state),
-1
);
}
public static ShardRouting relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) {
@ -112,8 +214,10 @@ public class TestShardRouting {
case UNASSIGNED:
case INITIALIZING:
if (primary) {
return OpenSearchTestCase.randomFrom(RecoverySource.EmptyStoreRecoverySource.INSTANCE,
RecoverySource.ExistingStoreRecoverySource.INSTANCE);
return OpenSearchTestCase.randomFrom(
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
RecoverySource.ExistingStoreRecoverySource.INSTANCE
);
} else {
return RecoverySource.PeerRecoverySource.INSTANCE;
}
@ -154,7 +258,8 @@ public class TestShardRouting {
}
public static RecoverySource randomRecoverySource() {
return OpenSearchTestCase.randomFrom(RecoverySource.EmptyStoreRecoverySource.INSTANCE,
return OpenSearchTestCase.randomFrom(
RecoverySource.EmptyStoreRecoverySource.INSTANCE,
RecoverySource.ExistingStoreRecoverySource.INSTANCE,
RecoverySource.PeerRecoverySource.INSTANCE,
RecoverySource.LocalShardsRecoverySource.INSTANCE,
@ -162,6 +267,8 @@ public class TestShardRouting {
UUIDs.randomBase64UUID(),
new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())),
Version.CURRENT,
new IndexId("some_index", UUIDs.randomBase64UUID(random()))));
new IndexId("some_index", UUIDs.randomBase64UUID(random()))
)
);
}
}

View File

@ -65,18 +65,33 @@ public class FakeThreadPoolMasterService extends MasterService {
private boolean taskInProgress = false;
private boolean waitForPublish = false;
public FakeThreadPoolMasterService(String nodeName, String serviceName, ThreadPool threadPool,
Consumer<Runnable> onTaskAvailableToRun) {
super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool);
public FakeThreadPoolMasterService(
String nodeName,
String serviceName,
ThreadPool threadPool,
Consumer<Runnable> onTaskAvailableToRun
) {
super(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool
);
this.name = serviceName;
this.onTaskAvailableToRun = onTaskAvailableToRun;
}
@Override
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new PrioritizedOpenSearchThreadPoolExecutor(name, 1, 1, 1, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory(name),
null, null) {
return new PrioritizedOpenSearchThreadPoolExecutor(
name,
1,
1,
1,
TimeUnit.SECONDS,
OpenSearchExecutors.daemonThreadFactory(name),
null,
null
) {
@Override
public void execute(Runnable command, final TimeValue timeout, final Runnable timeoutCallback) {
@ -179,9 +194,15 @@ public class FakeThreadPoolMasterService extends MasterService {
@Override
public String toString() {
return "publish change of cluster state from version [" + clusterChangedEvent.previousState().version() + "] in term [" +
clusterChangedEvent.previousState().term() + "] to version [" + clusterChangedEvent.state().version()
+ "] in term [" + clusterChangedEvent.state().term() + "]";
return "publish change of cluster state from version ["
+ clusterChangedEvent.previousState().version()
+ "] in term ["
+ clusterChangedEvent.previousState().term()
+ "] to version ["
+ clusterChangedEvent.state().version()
+ "] in term ["
+ clusterChangedEvent.state().term()
+ "]";
}
}));
}

View File

@ -32,9 +32,6 @@
package org.opensearch.common.breaker;
import org.opensearch.common.breaker.CircuitBreakingException;
import org.opensearch.common.breaker.NoopCircuitBreaker;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestCircuitBreaker extends NoopCircuitBreaker {

View File

@ -81,7 +81,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
}
public void testLength() throws IOException {
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)};
int[] sizes = { 0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3) };
for (int i = 0; i < sizes.length; i++) {
BytesReference pbr = newBytesReference(sizes[i]);
@ -90,14 +90,14 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
}
public void testSlice() throws IOException {
for (int length : new int[] {0, 1, randomIntBetween(2, PAGE_SIZE), randomIntBetween(PAGE_SIZE + 1, 3 * PAGE_SIZE)}) {
for (int length : new int[] { 0, 1, randomIntBetween(2, PAGE_SIZE), randomIntBetween(PAGE_SIZE + 1, 3 * PAGE_SIZE) }) {
BytesReference pbr = newBytesReference(length);
int sliceOffset = randomIntBetween(0, length / 2);
int sliceLength = Math.max(0, length - sliceOffset - 1);
BytesReference slice = pbr.slice(sliceOffset, sliceLength);
assertEquals(sliceLength, slice.length());
for (int i = 0; i < sliceLength; i++) {
assertEquals(pbr.get(i+sliceOffset), slice.get(i));
assertEquals(pbr.get(i + sliceOffset), slice.get(i));
}
BytesRef singlePageOrNull = getSinglePageOrNull(slice);
if (singlePageOrNull != null) {
@ -154,8 +154,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
// try to read more than the stream contains
si.reset();
expectThrows(IndexOutOfBoundsException.class, () ->
si.readBytes(targetBuf, 0, length * 2));
expectThrows(IndexOutOfBoundsException.class, () -> si.readBytes(targetBuf, 0, length * 2));
}
public void testStreamInputMarkAndReset() throws IOException {
@ -165,7 +164,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
assertNotNull(si);
StreamInput wrap = StreamInput.wrap(BytesReference.toBytes(pbr));
while(wrap.available() > 0) {
while (wrap.available() > 0) {
if (rarely()) {
wrap.mark(Integer.MAX_VALUE);
si.mark(Integer.MAX_VALUE);
@ -214,7 +213,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
switch (randomIntBetween(0, 10)) {
case 6:
case 5:
target.append(new BytesRef(new byte[]{streamInput.readByte()}));
target.append(new BytesRef(new byte[] { streamInput.readByte() }));
break;
case 4:
case 3:
@ -296,16 +295,16 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
final int iters = randomIntBetween(5, 50);
for (int i = 0; i < iters; i++) {
try (StreamInput input = pbr.streamInput()) {
final int offset = randomIntBetween(0, length-1);
final int offset = randomIntBetween(0, length - 1);
assertEquals(offset, input.skip(offset));
assertEquals(pbr.get(offset), input.readByte());
if (offset == length - 1) {
continue; // no more bytes to retrieve!
}
final int nextOffset = randomIntBetween(offset, length-2);
final int nextOffset = randomIntBetween(offset, length - 2);
assertEquals(nextOffset - offset, input.skip(nextOffset - offset));
assertEquals(pbr.get(nextOffset+1), input.readByte()); // +1 for the one byte we read above
assertEquals(length - (nextOffset+2), input.skip(Long.MAX_VALUE));
assertEquals(pbr.get(nextOffset + 1), input.readByte()); // +1 for the one byte we read above
assertEquals(length - (nextOffset + 2), input.skip(Long.MAX_VALUE));
assertEquals(0, input.skip(randomIntBetween(0, Integer.MAX_VALUE)));
}
}
@ -325,12 +324,12 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
}
public void testToBytes() throws IOException {
int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
int[] sizes = { 0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5)) };
for (int i = 0; i < sizes.length; i++) {
BytesReference pbr = newBytesReference(sizes[i]);
byte[] bytes = BytesReference.toBytes(pbr);
assertEquals(sizes[i], bytes.length);
for (int j = 0; j < bytes.length; j++) {
for (int j = 0; j < bytes.length; j++) {
assertEquals(bytes[j], pbr.get(j));
}
}
@ -403,7 +402,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
BytesRefIterator iterator = pbr.iterator();
BytesRef ref;
BytesRefBuilder builder = new BytesRefBuilder();
while((ref = iterator.next()) != null) {
while ((ref = iterator.next()) != null) {
builder.append(ref);
}
assertArrayEquals(BytesReference.toBytes(pbr), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
@ -418,7 +417,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
BytesRefIterator iterator = slice.iterator();
BytesRef ref = null;
BytesRefBuilder builder = new BytesRefBuilder();
while((ref = iterator.next()) != null) {
while ((ref = iterator.next()) != null) {
builder.append(ref);
}
assertArrayEquals(BytesReference.toBytes(slice), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
@ -439,7 +438,7 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
BytesRefIterator iterator = pbr.iterator();
BytesRef ref = null;
BytesRefBuilder builder = new BytesRefBuilder();
while((ref = iterator.next()) != null) {
while ((ref = iterator.next()) != null) {
builder.append(ref);
}
assertArrayEquals(BytesReference.toBytes(pbr), BytesRef.deepCopyOf(builder.toBytesRef()).bytes);
@ -540,8 +539,8 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
assertEquals(new BytesArray(bytesRef), copy);
int offsetToFlip = randomIntBetween(0, bytesRef.length - 1);
int value = ~Byte.toUnsignedInt(bytesRef.bytes[bytesRef.offset+offsetToFlip]);
bytesRef.bytes[bytesRef.offset+offsetToFlip] = (byte)value;
int value = ~Byte.toUnsignedInt(bytesRef.bytes[bytesRef.offset + offsetToFlip]);
bytesRef.bytes[bytesRef.offset + offsetToFlip] = (byte) value;
assertNotEquals(new BytesArray(bytesRef), copy);
}
@ -577,19 +576,18 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
assertTrue(bytesReference.compareTo(new BytesArray("")) > 0);
assertTrue(new BytesArray("").compareTo(bytesReference) < 0);
assertEquals(0, bytesReference.compareTo(bytesReference));
int sliceFrom = randomIntBetween(0, bytesReference.length());
int sliceLength = randomIntBetween(0, bytesReference.length() - sliceFrom);
BytesReference slice = bytesReference.slice(sliceFrom, sliceLength);
assertEquals(bytesReference.toBytesRef().compareTo(slice.toBytesRef()),
new BytesArray(bytesReference.toBytesRef(), true).compareTo(new BytesArray(slice.toBytesRef(), true)));
assertEquals(
bytesReference.toBytesRef().compareTo(slice.toBytesRef()),
new BytesArray(bytesReference.toBytesRef(), true).compareTo(new BytesArray(slice.toBytesRef(), true))
);
assertEquals(bytesReference.toBytesRef().compareTo(slice.toBytesRef()),
bytesReference.compareTo(slice));
assertEquals(slice.toBytesRef().compareTo(bytesReference.toBytesRef()),
slice.compareTo(bytesReference));
assertEquals(bytesReference.toBytesRef().compareTo(slice.toBytesRef()), bytesReference.compareTo(slice));
assertEquals(slice.toBytesRef().compareTo(bytesReference.toBytesRef()), slice.compareTo(bytesReference));
assertEquals(0, slice.compareTo(new BytesArray(slice.toBytesRef())));
assertEquals(0, new BytesArray(slice.toBytesRef()).compareTo(slice));
@ -607,10 +605,8 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
BytesReference crazyReference = crazyStream.bytes();
assertFalse(crazyReference.compareTo(bytesReference) == 0);
assertEquals(0, crazyReference.slice(offset, length).compareTo(
bytesReference));
assertEquals(0, bytesReference.compareTo(
crazyReference.slice(offset, length)));
assertEquals(0, crazyReference.slice(offset, length).compareTo(bytesReference));
assertEquals(0, bytesReference.compareTo(crazyReference.slice(offset, length)));
}
}
@ -632,16 +628,15 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
int num = 0;
if (ref.length() > 0) {
BytesRefIterator iterator = ref.iterator();
while(iterator.next() != null) {
while (iterator.next() != null) {
num++;
}
}
return num;
}
public void testBasicEquals() {
final int len = randomIntBetween(0, randomBoolean() ? 10: 100000);
final int len = randomIntBetween(0, randomBoolean() ? 10 : 100000);
final int offset1 = randomInt(5);
final byte[] array1 = new byte[offset1 + len + randomInt(5)];
random().nextBytes(array1);
@ -673,8 +668,9 @@ public abstract class AbstractBytesReferenceTestCase extends OpenSearchTestCase
final int count = randomIntBetween(1, 10);
final BytesReference bytesReference = newBytesReference(count * Integer.BYTES);
final BytesRef bytesRef = bytesReference.toBytesRef();
final IntBuffer intBuffer =
ByteBuffer.wrap(bytesRef.bytes, bytesRef.offset, bytesRef.length).order(ByteOrder.BIG_ENDIAN).asIntBuffer();
final IntBuffer intBuffer = ByteBuffer.wrap(bytesRef.bytes, bytesRef.offset, bytesRef.length)
.order(ByteOrder.BIG_ENDIAN)
.asIntBuffer();
for (int i = 0; i < count; ++i) {
assertEquals(intBuffer.get(i), bytesReference.getInt(i * Integer.BYTES));
}

View File

@ -57,8 +57,12 @@ public abstract class ModuleTestCase extends OpenSearchTestCase {
* Like {@link #assertInstanceBinding(Module, Class, Predicate)}, but filters the
* classes checked by the given annotation.
*/
private <T> void assertInstanceBindingWithAnnotation(Module module, Class<T> to,
Predicate<T> tester, Class<? extends Annotation> annotation) {
private <T> void assertInstanceBindingWithAnnotation(
Module module,
Class<T> to,
Predicate<T> tester,
Class<? extends Annotation> annotation
) {
List<Element> elements = Elements.getElements(module);
for (Element element : elements) {
if (element instanceof InstanceBinding) {
@ -69,7 +73,7 @@ public abstract class ModuleTestCase extends OpenSearchTestCase {
return;
}
}
} else if (element instanceof ProviderInstanceBinding) {
} else if (element instanceof ProviderInstanceBinding) {
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) element;
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
assertTrue(tester.test(to.cast(binding.getProviderInstance().get())));

View File

@ -37,7 +37,6 @@ import org.opensearch.common.xcontent.ObjectParser;
import java.util.List;
/**
* Represents a single log line in a json format.
* Parsing log lines with this class confirms the json format of logs

View File

@ -84,25 +84,23 @@ public abstract class JsonLogsIntegTestCase extends OpenSearchRestTestCase {
assertNotNull(firstLine);
try (Stream<JsonLogLine> stream = JsonLogsStream.from(openReader(getLogFile()))) {
stream.limit(LINES_TO_CHECK)
.forEach(jsonLogLine -> {
assertThat(jsonLogLine.type(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.timestamp(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.level(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.component(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.message(), is(not(emptyOrNullString())));
stream.limit(LINES_TO_CHECK).forEach(jsonLogLine -> {
assertThat(jsonLogLine.type(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.timestamp(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.level(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.component(), is(not(emptyOrNullString())));
assertThat(jsonLogLine.message(), is(not(emptyOrNullString())));
// all lines should have the same nodeName and clusterName
assertThat(jsonLogLine.nodeName(), nodeNameMatcher());
assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName()));
});
// all lines should have the same nodeName and clusterName
assertThat(jsonLogLine.nodeName(), nodeNameMatcher());
assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName()));
});
}
}
private JsonLogLine findFirstLine() throws IOException {
try (Stream<JsonLogLine> stream = JsonLogsStream.from(openReader(getLogFile()))) {
return stream.findFirst()
.orElseThrow(() -> new AssertionError("no logs at all?!"));
return stream.findFirst().orElseThrow(() -> new AssertionError("no logs at all?!"));
}
}
@ -119,7 +117,7 @@ public abstract class JsonLogsIntegTestCase extends OpenSearchRestTestCase {
}
assertNotNull(firstLine);
//once the nodeId and clusterId are received, they should be the same on remaining lines
// once the nodeId and clusterId are received, they should be the same on remaining lines
int i = 0;
while (iterator.hasNext() && i++ < LINES_TO_CHECK) {
@ -134,9 +132,11 @@ public abstract class JsonLogsIntegTestCase extends OpenSearchRestTestCase {
private Path getLogFile() {
String logFileString = System.getProperty("tests.logfile");
if (logFileString == null) {
fail("tests.logfile must be set to run this test. It is automatically "
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
+ "log file.");
fail(
"tests.logfile must be set to run this test. It is automatically "
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
+ "log file."
);
}
return Paths.get(logFileString);
}

View File

@ -60,8 +60,11 @@ public class JsonLogsStream {
private JsonLogsStream(BufferedReader reader) throws IOException {
this.reader = reader;
this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
reader);
this.parser = JsonXContent.jsonXContent.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
reader
);
}
public static Stream<JsonLogLine> from(BufferedReader reader) throws IOException {
@ -78,14 +81,12 @@ public class JsonLogsStream {
private Stream<JsonLogLine> stream() {
Spliterator<JsonLogLine> spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false)
.onClose(this::close);
return StreamSupport.stream(spliterator, false).onClose(this::close);
}
private Stream<Map<String, String>> streamMap() {
Spliterator<Map<String, String>> spliterator = Spliterators.spliteratorUnknownSize(new MapIterator(), Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false)
.onClose(this::close);
return StreamSupport.stream(spliterator, false).onClose(this::close);
}
private void close() {

View File

@ -49,15 +49,14 @@ import org.opensearch.test.OpenSearchIntegTestCase;
* {@link OpenSearchIntegTestCase} then this information is the node name.
*/
@Plugin(category = PatternConverter.CATEGORY, name = "TestInfoPatternConverter")
@ConverterKeys({"test_thread_info"})
@ConverterKeys({ "test_thread_info" })
public class TestThreadInfoPatternConverter extends LogEventPatternConverter {
/**
* Called by log4j2 to initialize this converter.
*/
public static TestThreadInfoPatternConverter newInstance(final String[] options) {
if (options.length > 0) {
throw new IllegalArgumentException("no options supported but options provided: "
+ Arrays.toString(options));
throw new IllegalArgumentException("no options supported but options provided: " + Arrays.toString(options));
}
return new TestThreadInfoPatternConverter();
}
@ -74,14 +73,10 @@ public class TestThreadInfoPatternConverter extends LogEventPatternConverter {
}
}
private static final Pattern OPENSEARCH_THREAD_NAME_PATTERN =
Pattern.compile("opensearch\\[(.+)\\]\\[.+\\].+");
private static final Pattern TEST_THREAD_NAME_PATTERN =
Pattern.compile("TEST-.+\\.(.+)-seed#\\[.+\\]");
private static final Pattern TEST_SUITE_INIT_THREAD_NAME_PATTERN =
Pattern.compile("SUITE-.+-worker");
private static final Pattern NOT_YET_NAMED_NODE_THREAD_NAME_PATTERN =
Pattern.compile("test_SUITE-CHILD_VM.+cluster\\[T#(.+)\\]");
private static final Pattern OPENSEARCH_THREAD_NAME_PATTERN = Pattern.compile("opensearch\\[(.+)\\]\\[.+\\].+");
private static final Pattern TEST_THREAD_NAME_PATTERN = Pattern.compile("TEST-.+\\.(.+)-seed#\\[.+\\]");
private static final Pattern TEST_SUITE_INIT_THREAD_NAME_PATTERN = Pattern.compile("SUITE-.+-worker");
private static final Pattern NOT_YET_NAMED_NODE_THREAD_NAME_PATTERN = Pattern.compile("test_SUITE-CHILD_VM.+cluster\\[T#(.+)\\]");
static String threadInfo(String threadName) {
Matcher m = OPENSEARCH_THREAD_NAME_PATTERN.matcher(threadName);

View File

@ -57,7 +57,12 @@ public class OpenSearchIndexInputTestCase extends OpenSearchTestCase {
public static void createExecutor() {
final String name = "TEST-" + getTestClass().getSimpleName() + "#randomReadAndSlice";
executor = OpenSearchExecutors.newFixed(
name, 10, 0, OpenSearchExecutors.daemonThreadFactory(name), new ThreadContext(Settings.EMPTY));
name,
10,
0,
OpenSearchExecutors.daemonThreadFactory(name),
new ThreadContext(Settings.EMPTY)
);
}
@AfterClass

View File

@ -55,8 +55,7 @@ public class MockSecureSettings implements SecureSettings {
private Set<String> settingNames = new HashSet<>();
private final AtomicBoolean closed = new AtomicBoolean(false);
public MockSecureSettings() {
}
public MockSecureSettings() {}
private MockSecureSettings(MockSecureSettings source) {
secureStrings.putAll(source.secureStrings);

View File

@ -79,8 +79,10 @@ public class MockBigArrays extends BigArrays {
if (!masterCopy.isEmpty()) {
Iterator<Object> causes = masterCopy.values().iterator();
Object firstCause = causes.next();
RuntimeException exception = new RuntimeException(masterCopy.size() + " arrays have not been released",
firstCause instanceof Throwable ? (Throwable) firstCause : null);
RuntimeException exception = new RuntimeException(
masterCopy.size() + " arrays have not been released",
firstCause instanceof Throwable ? (Throwable) firstCause : null
);
while (causes.hasNext()) {
Object cause = causes.next();
if (cause instanceof Throwable) {
@ -114,7 +116,6 @@ public class MockBigArrays extends BigArrays {
random = new Random(seed);
}
@Override
public BigArrays withCircuitBreaking() {
return new MockBigArrays(this.recycler, this.breakerService, true);
@ -276,9 +277,12 @@ public class MockBigArrays extends BigArrays {
AbstractArrayWrapper(boolean clearOnResize) {
this.clearOnResize = clearOnResize;
this.originalRelease = new AtomicReference<>();
ACQUIRED_ARRAYS.put(this,
TRACK_ALLOCATIONS ? new RuntimeException("Unreleased array from test: " + LuceneTestCase.getTestClass().getName())
: Boolean.TRUE);
ACQUIRED_ARRAYS.put(
this,
TRACK_ALLOCATIONS
? new RuntimeException("Unreleased array from test: " + LuceneTestCase.getTestClass().getName())
: Boolean.TRUE
);
}
protected abstract BigArray getDelegate();

View File

@ -58,8 +58,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing pages, lets make sure that after a reasonable timeout, all master
// copy (snapshot) have been released
final boolean success =
waitUntil(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()));
final boolean success = waitUntil(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()));
if (!success) {
masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
@ -98,20 +97,20 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
}
final T ref = v();
if (ref instanceof Object[]) {
Arrays.fill((Object[])ref, 0, Array.getLength(ref), null);
Arrays.fill((Object[]) ref, 0, Array.getLength(ref), null);
} else if (ref instanceof byte[]) {
Arrays.fill((byte[])ref, 0, Array.getLength(ref), (byte) random.nextInt(256));
Arrays.fill((byte[]) ref, 0, Array.getLength(ref), (byte) random.nextInt(256));
} else if (ref instanceof long[]) {
Arrays.fill((long[])ref, 0, Array.getLength(ref), random.nextLong());
Arrays.fill((long[]) ref, 0, Array.getLength(ref), random.nextLong());
} else if (ref instanceof int[]) {
Arrays.fill((int[])ref, 0, Array.getLength(ref), random.nextInt());
Arrays.fill((int[]) ref, 0, Array.getLength(ref), random.nextInt());
} else if (ref instanceof double[]) {
Arrays.fill((double[])ref, 0, Array.getLength(ref), random.nextDouble() - 0.5);
Arrays.fill((double[]) ref, 0, Array.getLength(ref), random.nextDouble() - 0.5);
} else if (ref instanceof float[]) {
Arrays.fill((float[])ref, 0, Array.getLength(ref), random.nextFloat() - 0.5f);
Arrays.fill((float[]) ref, 0, Array.getLength(ref), random.nextFloat() - 0.5f);
} else {
for (int i = 0; i < Array.getLength(ref); ++i) {
Array.set(ref, i, (byte) random.nextInt(256));
Array.set(ref, i, (byte) random.nextInt(256));
}
}
v.close();
@ -134,7 +133,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
public V<byte[]> bytePage(boolean clear) {
final V<byte[]> page = super.bytePage(clear);
if (!clear) {
Arrays.fill(page.v(), 0, page.v().length, (byte)random.nextInt(1<<8));
Arrays.fill(page.v(), 0, page.v().length, (byte) random.nextInt(1 << 8));
}
return wrap(page);
}

View File

@ -40,13 +40,12 @@ import java.util.regex.Pattern;
* A formatter that allows named placeholders e.g. "%(param)" to be replaced.
*/
public class NamedFormatter {
private static final Pattern PARAM_REGEX = Pattern
.compile(
// Match either any backlash-escaped characters, or a "%(param)" pattern.
// COMMENTS is specified to allow whitespace in this pattern, for clarity
"\\\\(.) | (% \\( ([^)]+) \\) )",
Pattern.COMMENTS
);
private static final Pattern PARAM_REGEX = Pattern.compile(
// Match either any backlash-escaped characters, or a "%(param)" pattern.
// COMMENTS is specified to allow whitespace in this pattern, for clarity
"\\\\(.) | (% \\( ([^)]+) \\) )",
Pattern.COMMENTS
);
private NamedFormatter() {}

View File

@ -41,8 +41,7 @@ import org.opensearch.common.settings.Settings;
*/
public class TestEnvironment {
private TestEnvironment() {
}
private TestEnvironment() {}
public static Environment newEnvironment(Settings settings) {
return new Environment(settings, null);

View File

@ -69,8 +69,11 @@ public class MockGatewayMetaState extends GatewayMetaState {
}
@Override
Metadata upgradeMetadataForNode(Metadata metadata, MetadataIndexUpgradeService metadataIndexUpgradeService,
MetadataUpgrader metadataUpgrader) {
Metadata upgradeMetadataForNode(
Metadata metadata,
MetadataIndexUpgradeService metadataIndexUpgradeService,
MetadataUpgrader metadataUpgrader
) {
// Metadata upgrade is tested in GatewayMetaStateTests, we override this method to NOP to make mocking easier
return metadata;
}
@ -85,16 +88,29 @@ public class MockGatewayMetaState extends GatewayMetaState {
final TransportService transportService = mock(TransportService.class);
when(transportService.getThreadPool()).thenReturn(mock(ThreadPool.class));
final ClusterService clusterService = mock(ClusterService.class);
when(clusterService.getClusterSettings())
.thenReturn(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
when(clusterService.getClusterSettings()).thenReturn(
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
);
final MetaStateService metaStateService = mock(MetaStateService.class);
try {
when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), Metadata.builder().build()));
} catch (IOException e) {
throw new AssertionError(e);
}
start(settings, transportService, clusterService, metaStateService,
null, null, new PersistedClusterStateService(nodeEnvironment, xContentRegistry, bigArrays,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L));
start(
settings,
transportService,
clusterService,
metaStateService,
null,
null,
new PersistedClusterStateService(
nodeEnvironment,
xContentRegistry,
bigArrays,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
() -> 0L
)
);
}
}

View File

@ -71,8 +71,12 @@ public class GeometryTestUtils {
public static Circle randomCircle(boolean hasAlt) {
if (hasAlt) {
return new Circle(randomLon(), randomLat(), OpenSearchTestCase.randomDouble(),
OpenSearchTestCase.randomDoubleBetween(0, 100, false));
return new Circle(
randomLon(),
randomLat(),
OpenSearchTestCase.randomDouble(),
OpenSearchTestCase.randomDoubleBetween(0, 100, false)
);
} else {
return new Circle(randomLon(), randomLat(), OpenSearchTestCase.randomDoubleBetween(0, 100, false));
}
@ -129,13 +133,12 @@ public class GeometryTestUtils {
final int numPts = lucenePolygon.numPoints() - 1;
for (int i = 0; i < numPts; i++) {
// compute signed area
windingSum += lucenePolygon.getPolyLon(i) * lucenePolygon.getPolyLat(i + 1) -
lucenePolygon.getPolyLat(i) * lucenePolygon.getPolyLon(i + 1);
windingSum += lucenePolygon.getPolyLon(i) * lucenePolygon.getPolyLat(i + 1) - lucenePolygon.getPolyLat(i) * lucenePolygon
.getPolyLon(i + 1);
}
return Math.abs(windingSum / 2);
return Math.abs(windingSum / 2);
}
private static double[] randomAltRing(int size) {
double[] alts = new double[size];
for (int i = 0; i < size - 1; i++) {
@ -145,7 +148,7 @@ public class GeometryTestUtils {
return alts;
}
public static LinearRing linearRing(double[] lons, double[] lats,boolean generateAlts) {
public static LinearRing linearRing(double[] lons, double[] lats, boolean generateAlts) {
if (generateAlts) {
return new LinearRing(lons, lats, randomAltRing(lats.length));
}
@ -202,7 +205,8 @@ public class GeometryTestUtils {
}
protected static Geometry randomGeometry(int level, boolean hasAlt) {
@SuppressWarnings("unchecked") Function<Boolean, Geometry> geometry = OpenSearchTestCase.randomFrom(
@SuppressWarnings("unchecked")
Function<Boolean, Geometry> geometry = OpenSearchTestCase.randomFrom(
GeometryTestUtils::randomCircle,
GeometryTestUtils::randomLine,
GeometryTestUtils::randomPoint,
@ -279,8 +283,12 @@ public class GeometryTestUtils {
@Override
public MultiPoint visit(Rectangle rectangle) throws RuntimeException {
return new MultiPoint(Arrays.asList(new Point(rectangle.getMinX(), rectangle.getMinY(), rectangle.getMinZ()),
new Point(rectangle.getMaxX(), rectangle.getMaxY(), rectangle.getMaxZ())));
return new MultiPoint(
Arrays.asList(
new Point(rectangle.getMinX(), rectangle.getMinY(), rectangle.getMinZ()),
new Point(rectangle.getMaxX(), rectangle.getMaxY(), rectangle.getMaxZ())
)
);
}
});
}

View File

@ -57,22 +57,26 @@ import static org.opensearch.test.OpenSearchTestCase.createTestAnalysis;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
public class MapperTestUtils {
public static MapperService newMapperService(NamedXContentRegistry xContentRegistry,
Path tempDir,
Settings indexSettings,
String indexName) throws IOException {
public static MapperService newMapperService(
NamedXContentRegistry xContentRegistry,
Path tempDir,
Settings indexSettings,
String indexName
) throws IOException {
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
return newMapperService(xContentRegistry, tempDir, indexSettings, indicesModule, indexName);
}
public static MapperService newMapperService(NamedXContentRegistry xContentRegistry, Path tempDir, Settings settings,
IndicesModule indicesModule, String indexName) throws IOException {
Settings.Builder settingsBuilder = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
.put(settings);
public static MapperService newMapperService(
NamedXContentRegistry xContentRegistry,
Path tempDir,
Settings settings,
IndicesModule indicesModule,
String indexName
) throws IOException {
Settings.Builder settingsBuilder = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), tempDir).put(settings);
if (settings.get(IndexMetadata.SETTING_VERSION_CREATED) == null) {
settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT);
}
@ -81,24 +85,28 @@ public class MapperTestUtils {
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers;
SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap());
return new MapperService(indexSettings,
return new MapperService(
indexSettings,
indexAnalyzers,
xContentRegistry,
similarityService,
mapperRegistry,
() -> null, () -> false, null);
() -> null,
() -> false,
null
);
}
public static void assertConflicts(String mapping1,
String mapping2,
DocumentMapperParser
parser, String... conflicts) throws IOException {
public static void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts)
throws IOException {
DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1));
if (conflicts.length == 0) {
docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE);
} else {
Exception e = expectThrows(IllegalArgumentException.class,
() -> docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE));
Exception e = expectThrows(
IllegalArgumentException.class,
() -> docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), MergeReason.MAPPING_UPDATE)
);
for (String conflict : conflicts) {
assertThat(e.getMessage(), containsString(conflict));
}

View File

@ -111,24 +111,24 @@ public final class RandomAliasActionsGenerator {
for (int i = 0; i < members; i++) {
Object value;
switch (between(0, 3)) {
case 0:
if (maxDepth > 0) {
value = randomMap(maxDepth - 1);
} else {
case 0:
if (maxDepth > 0) {
value = randomMap(maxDepth - 1);
} else {
value = randomAlphaOfLength(5);
}
break;
case 1:
value = randomAlphaOfLength(5);
}
break;
case 1:
value = randomAlphaOfLength(5);
break;
case 2:
value = randomBoolean();
break;
case 3:
value = randomLong();
break;
default:
throw new UnsupportedOperationException();
break;
case 2:
value = randomBoolean();
break;
case 3:
value = randomLong();
break;
default:
throw new UnsupportedOperationException();
}
result.put(randomAlphaOfLength(5), value);
}

View File

@ -48,26 +48,29 @@ import java.util.Arrays;
public class AnalysisTestsHelper {
public static OpenSearchTestCase.TestAnalysis createTestAnalysisFromClassPath(final Path baseDir,
final String resource,
final AnalysisPlugin... plugins) throws IOException {
public static OpenSearchTestCase.TestAnalysis createTestAnalysisFromClassPath(
final Path baseDir,
final String resource,
final AnalysisPlugin... plugins
) throws IOException {
final Settings settings = Settings.builder()
.loadFromStream(resource, AnalysisTestsHelper.class.getResourceAsStream(resource), false)
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString())
.build();
.loadFromStream(resource, AnalysisTestsHelper.class.getResourceAsStream(resource), false)
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString())
.build();
return createTestAnalysisFromSettings(settings, plugins);
}
public static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(
final Settings settings, final AnalysisPlugin... plugins) throws IOException {
public static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(final Settings settings, final AnalysisPlugin... plugins)
throws IOException {
return createTestAnalysisFromSettings(settings, null, plugins);
}
public static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(
final Settings settings,
final Path configPath,
final AnalysisPlugin... plugins) throws IOException {
final Settings settings,
final Path configPath,
final AnalysisPlugin... plugins
) throws IOException {
final Settings actualSettings;
if (settings.get(IndexMetadata.SETTING_VERSION_CREATED) == null) {
actualSettings = Settings.builder().put(settings).put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build();
@ -75,12 +78,14 @@ public class AnalysisTestsHelper {
actualSettings = settings;
}
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", actualSettings);
final AnalysisRegistry analysisRegistry =
new AnalysisModule(new Environment(actualSettings, configPath), Arrays.asList(plugins)).getAnalysisRegistry();
return new OpenSearchTestCase.TestAnalysis(analysisRegistry.build(indexSettings),
analysisRegistry.buildTokenFilterFactories(indexSettings),
analysisRegistry.buildTokenizerFactories(indexSettings),
analysisRegistry.buildCharFilterFactories(indexSettings));
final AnalysisRegistry analysisRegistry = new AnalysisModule(new Environment(actualSettings, configPath), Arrays.asList(plugins))
.getAnalysisRegistry();
return new OpenSearchTestCase.TestAnalysis(
analysisRegistry.build(indexSettings),
analysisRegistry.buildTokenFilterFactories(indexSettings),
analysisRegistry.buildTokenizerFactories(indexSettings),
analysisRegistry.buildCharFilterFactories(indexSettings)
);
}
}

View File

@ -32,7 +32,6 @@
package org.opensearch.index.engine;
import org.apache.lucene.util.BytesRef;
import java.util.Objects;
@ -78,8 +77,11 @@ public final class DocIdSeqNoAndSource {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DocIdSeqNoAndSource that = (DocIdSeqNoAndSource) o;
return Objects.equals(id, that.id) && Objects.equals(source, that.source)
&& seqNo == that.seqNo && primaryTerm == that.primaryTerm && version == that.version;
return Objects.equals(id, that.id)
&& Objects.equals(source, that.source)
&& seqNo == that.seqNo
&& primaryTerm == that.primaryTerm
&& version == that.version;
}
@Override
@ -89,7 +91,17 @@ public final class DocIdSeqNoAndSource {
@Override
public String toString() {
return "doc{" + "id='" + id + " seqNo=" + seqNo + " primaryTerm=" + primaryTerm
+ " version=" + version + " source= " + (source != null ? source.utf8ToString() : null) + "}";
return "doc{"
+ "id='"
+ id
+ " seqNo="
+ seqNo
+ " primaryTerm="
+ primaryTerm
+ " version="
+ version
+ " source= "
+ (source != null ? source.utf8ToString() : null)
+ "}";
}
}

View File

@ -50,8 +50,11 @@ class InternalTestEngine extends InternalEngine {
super(engineConfig);
}
InternalTestEngine(EngineConfig engineConfig, int maxDocs,
BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) {
InternalTestEngine(
EngineConfig engineConfig,
int maxDocs,
BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier
) {
super(engineConfig, maxDocs, localCheckpointTrackerSupplier);
}

View File

@ -80,8 +80,16 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner {
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, emptyMap(), emptyMap());
SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap());
MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry();
mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry,
() -> null, () -> false, null);
mapperService = new MapperService(
indexSettings,
indexAnalyzers,
xContentRegistry,
similarityService,
mapperRegistry,
() -> null,
() -> false,
null
);
}
private DocumentMapperForType docMapper(String type) {
@ -96,8 +104,10 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner {
Engine.Index engineIndex = (Engine.Index) operation;
Mapping update = engineIndex.parsedDoc().dynamicMappingsUpdate();
if (engineIndex.parsedDoc().dynamicMappingsUpdate() != null) {
recoveredTypes.compute(engineIndex.type(), (k, mapping) ->
mapping == null ? update : mapping.merge(update, MapperService.MergeReason.MAPPING_RECOVERY));
recoveredTypes.compute(
engineIndex.type(),
(k, mapping) -> mapping == null ? update : mapping.merge(update, MapperService.MergeReason.MAPPING_RECOVERY)
);
}
engine.index(engineIndex);
break;
@ -139,21 +149,46 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner {
case INDEX:
final Translog.Index index = (Translog.Index) operation;
final String indexName = mapperService.index().getName();
final Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()),
new SourceToParse(indexName, index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source()),
index.routing()), index.seqNo(), index.primaryTerm(), index.version(), versionType, origin,
index.getAutoGeneratedIdTimestamp(), true, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
final Engine.Index engineIndex = IndexShard.prepareIndex(
docMapper(index.type()),
new SourceToParse(
indexName,
index.type(),
index.id(),
index.source(),
XContentHelper.xContentType(index.source()),
index.routing()
),
index.seqNo(),
index.primaryTerm(),
index.version(),
versionType,
origin,
index.getAutoGeneratedIdTimestamp(),
true,
SequenceNumbers.UNASSIGNED_SEQ_NO,
SequenceNumbers.UNASSIGNED_PRIMARY_TERM
);
return engineIndex;
case DELETE:
final Translog.Delete delete = (Translog.Delete) operation;
final Engine.Delete engineDelete = new Engine.Delete(delete.type(), delete.id(), delete.uid(), delete.seqNo(),
delete.primaryTerm(), delete.version(), versionType, origin, System.nanoTime(),
SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
final Engine.Delete engineDelete = new Engine.Delete(
delete.type(),
delete.id(),
delete.uid(),
delete.seqNo(),
delete.primaryTerm(),
delete.version(),
versionType,
origin,
System.nanoTime(),
SequenceNumbers.UNASSIGNED_SEQ_NO,
SequenceNumbers.UNASSIGNED_PRIMARY_TERM
);
return engineDelete;
case NO_OP:
final Translog.NoOp noOp = (Translog.NoOp) operation;
final Engine.NoOp engineNoOp =
new Engine.NoOp(noOp.seqNo(), noOp.primaryTerm(), origin, System.nanoTime(), noOp.reason());
final Engine.NoOp engineNoOp = new Engine.NoOp(noOp.seqNo(), noOp.primaryTerm(), origin, System.nanoTime(), noOp.reason());
return engineNoOp;
default:
throw new IllegalStateException("No operation defined for [" + operation + "]");

View File

@ -38,6 +38,7 @@ import static org.hamcrest.Matchers.hasItem;
public abstract class AbstractNumericFieldMapperTestCase extends MapperTestCase {
protected abstract Set<String> types();
protected abstract Set<String> wholeTypes();
public final void testTypesAndWholeTypes() {

View File

@ -66,9 +66,7 @@ import static org.hamcrest.Matchers.equalTo;
@Deprecated
public abstract class FieldMapperTestCase<T extends FieldMapper.Builder<?>> extends OpenSearchSingleNodeTestCase {
protected final Settings SETTINGS = Settings.builder()
.put("index.version.created", Version.CURRENT)
.build();
protected final Settings SETTINGS = Settings.builder().put("index.version.created", Version.CURRENT).build();
private final class Modifier {
final String property;
@ -97,19 +95,16 @@ public abstract class FieldMapperTestCase<T extends FieldMapper.Builder<?>> exte
return Collections.emptySet();
}
private final List<Modifier> modifiers = new ArrayList<>(Arrays.asList(
new Modifier("analyzer", false, (a, b) -> {
a.indexAnalyzer(new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
a.indexAnalyzer(new NamedAnalyzer("keyword", AnalyzerScope.INDEX, new KeywordAnalyzer()));
}),
new Modifier("boost", true, (a, b) -> {
a.boost(1.1f);
b.boost(1.2f);
}),
new Modifier("doc_values", false, (a, b) -> {
a.docValues(true);
b.docValues(false);
}),
private final List<Modifier> modifiers = new ArrayList<>(Arrays.asList(new Modifier("analyzer", false, (a, b) -> {
a.indexAnalyzer(new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
a.indexAnalyzer(new NamedAnalyzer("keyword", AnalyzerScope.INDEX, new KeywordAnalyzer()));
}), new Modifier("boost", true, (a, b) -> {
a.boost(1.1f);
b.boost(1.2f);
}), new Modifier("doc_values", false, (a, b) -> {
a.docValues(true);
b.docValues(false);
}),
booleanModifier("eager_global_ordinals", true, (a, t) -> a.setEagerGlobalOrdinals(t)),
booleanModifier("index", false, (a, t) -> a.index(t)),
booleanModifier("norms", false, FieldMapper.Builder::omitNorms),
@ -209,8 +204,11 @@ public abstract class FieldMapperTestCase<T extends FieldMapper.Builder<?>> exte
if (modifier.updateable) {
mapper.merge(toMerge);
} else {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
"Expected an error when merging property difference " + modifier.property, () -> mapper.merge(toMerge));
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
"Expected an error when merging property difference " + modifier.property,
() -> mapper.merge(toMerge)
);
assertThat(e.getMessage(), containsString(modifier.property));
}
}
@ -255,8 +253,9 @@ public abstract class FieldMapperTestCase<T extends FieldMapper.Builder<?>> exte
}
private String mappingsToString(ToXContent builder, boolean includeDefaults) throws IOException {
ToXContent.Params params = includeDefaults ?
new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")) : ToXContent.EMPTY_PARAMS;
ToXContent.Params params = includeDefaults
? new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))
: ToXContent.EMPTY_PARAMS;
XContentBuilder x = JsonXContent.contentBuilder();
x.startObject().startObject("properties");
builder.toXContent(x, params);

View File

@ -82,8 +82,9 @@ public abstract class MapperServiceTestCase extends OpenSearchTestCase {
protected static final Settings SETTINGS = Settings.builder().put("index.version.created", Version.CURRENT).build();
protected static final ToXContent.Params INCLUDE_DEFAULTS
= new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"));
protected static final ToXContent.Params INCLUDE_DEFAULTS = new ToXContent.MapParams(
Collections.singletonMap("include_defaults", "true")
);
protected Collection<? extends Plugin> getPlugins() {
return emptyList();
@ -206,9 +207,7 @@ public abstract class MapperServiceTestCase extends OpenSearchTestCase {
/**
* Merge a new mapping into the one in the provided {@link MapperService} with a specific {@code MergeReason}
*/
protected final void merge(MapperService mapperService,
MapperService.MergeReason reason,
XContentBuilder mapping) throws IOException {
protected final void merge(MapperService mapperService, MapperService.MergeReason reason, XContentBuilder mapping) throws IOException {
mapperService.merge("_doc", new CompressedXContent(BytesReference.bytes(mapping)), reason);
}
@ -250,12 +249,9 @@ public abstract class MapperServiceTestCase extends OpenSearchTestCase {
inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString())
);
when(queryShardContext.allowExpensiveQueries()).thenReturn(true);
when(queryShardContext.lookup()).thenReturn(new SearchLookup(
mapperService,
(ft, s) -> {
throw new UnsupportedOperationException("search lookup not available");
},
null));
when(queryShardContext.lookup()).thenReturn(
new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); }, null)
);
return queryShardContext;
}
}

View File

@ -130,7 +130,7 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
assertThat(query, instanceOf(TermQuery.class));
TermQuery termQuery = (TermQuery) query;
assertEquals(FieldNamesFieldMapper.NAME, termQuery.getTerm().field());
//we always perform a term query against _field_names, even when the field
// we always perform a term query against _field_names, even when the field
// is not added to _field_names because it is not indexed nor stored
assertEquals("field", termQuery.getTerm().text());
assertNoDocValuesField(fields, "field");
@ -229,12 +229,10 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
public void testMeta() throws IOException {
assumeTrue("Field doesn't support meta", supportsMeta());
XContentBuilder mapping = fieldMapping(
b -> {
metaMapping(b);
b.field("meta", Collections.singletonMap("foo", "bar"));
}
);
XContentBuilder mapping = fieldMapping(b -> {
metaMapping(b);
b.field("meta", Collections.singletonMap("foo", "bar"));
});
MapperService mapperService = createMapperService(mapping);
assertEquals(
XContentHelper.convertToMap(BytesReference.bytes(mapping), false, mapping.contentType()).v2(),
@ -288,22 +286,22 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
throws IOException {
BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup = (mft, lookupSource) -> mft
.fielddataBuilder("test", () -> {
throw new UnsupportedOperationException();
})
.fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); })
.build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService());
SetOnce<List<?>> result = new SetOnce<>();
withLuceneIndex(mapperService, iw -> {
iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc());
}, iw -> {
SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, null);
ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft));
IndexSearcher searcher = newSearcher(iw);
LeafReaderContext context = searcher.getIndexReader().leaves().get(0);
lookup.source().setSegmentAndDocument(context, 0);
valueFetcher.setNextReader(context);
result.set(valueFetcher.fetchValues(lookup.source()));
});
withLuceneIndex(
mapperService,
iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); },
iw -> {
SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, null);
ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft));
IndexSearcher searcher = newSearcher(iw);
LeafReaderContext context = searcher.getIndexReader().leaves().get(0);
lookup.source().setSegmentAndDocument(context, 0);
valueFetcher.setNextReader(context);
result.set(valueFetcher.fetchValues(lookup.source()));
}
);
return result.get();
}
@ -312,8 +310,7 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
final XContentBuilder update;
final Consumer<FieldMapper> check;
private UpdateCheck(CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check) throws IOException {
private UpdateCheck(CheckedConsumer<XContentBuilder, IOException> update, Consumer<FieldMapper> check) throws IOException {
this.init = fieldMapping(MapperTestCase.this::minimalMapping);
this.update = fieldMapping(b -> {
minimalMapping(b);
@ -322,9 +319,11 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
this.check = check;
}
private UpdateCheck(CheckedConsumer<XContentBuilder, IOException> init,
CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check) throws IOException {
private UpdateCheck(
CheckedConsumer<XContentBuilder, IOException> init,
CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check
) throws IOException {
this.init = fieldMapping(init);
this.update = fieldMapping(update);
this.check = check;
@ -352,8 +351,8 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
* @param update a field builder applied on top of the minimal mapping
* @param check a check that the updated parameter has been applied to the FieldMapper
*/
public void registerUpdateCheck(CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check) throws IOException {
public void registerUpdateCheck(CheckedConsumer<XContentBuilder, IOException> update, Consumer<FieldMapper> check)
throws IOException {
updateChecks.add(new UpdateCheck(update, check));
}
@ -364,9 +363,11 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
* @param update the updated mapping
* @param check a check that the updated parameter has been applied to the FieldMapper
*/
public void registerUpdateCheck(CheckedConsumer<XContentBuilder, IOException> init,
CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check) throws IOException {
public void registerUpdateCheck(
CheckedConsumer<XContentBuilder, IOException> init,
CheckedConsumer<XContentBuilder, IOException> update,
Consumer<FieldMapper> check
) throws IOException {
updateChecks.add(new UpdateCheck(init, update, check));
}
@ -377,13 +378,10 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
* @param update a field builder applied on top of the minimal mapping
*/
public void registerConflictCheck(String param, CheckedConsumer<XContentBuilder, IOException> update) throws IOException {
conflictChecks.put(param, new ConflictCheck(
fieldMapping(MapperTestCase.this::minimalMapping),
fieldMapping(b -> {
minimalMapping(b);
update.accept(b);
})
));
conflictChecks.put(param, new ConflictCheck(fieldMapping(MapperTestCase.this::minimalMapping), fieldMapping(b -> {
minimalMapping(b);
update.accept(b);
})));
}
/**
@ -419,12 +417,15 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
// merging the same change is fine
merge(mapperService, checker.conflictChecks.get(param).init);
// merging the conflicting update should throw an exception
Exception e = expectThrows(IllegalArgumentException.class,
Exception e = expectThrows(
IllegalArgumentException.class,
"No conflict when updating parameter [" + param + "]",
() -> merge(mapperService, checker.conflictChecks.get(param).update));
assertThat(e.getMessage(), anyOf(
containsString("Cannot update parameter [" + param + "]"),
containsString("different [" + param + "]")));
() -> merge(mapperService, checker.conflictChecks.get(param).update)
);
assertThat(
e.getMessage(),
anyOf(containsString("Cannot update parameter [" + param + "]"), containsString("different [" + param + "]"))
);
}
assertParseMaximalWarnings();
}

View File

@ -42,7 +42,7 @@ public class MockFieldFilterPlugin extends Plugin implements MapperPlugin {
@Override
public Function<String, Predicate<String>> getFieldFilter() {
//this filter doesn't filter any field out, but it's used to exercise the code path executed when the filter is not no-op
// this filter doesn't filter any field out, but it's used to exercise the code path executed when the filter is not no-op
return index -> field -> true;
}
}

View File

@ -42,23 +42,17 @@ import java.util.List;
// this sucks how much must be overridden just do get a dummy field mapper...
public class MockFieldMapper extends ParametrizedFieldMapper {
static Settings DEFAULT_SETTINGS = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id)
.build();
static Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build();
public MockFieldMapper(String fullName) {
this(new FakeFieldType(fullName));
}
public MockFieldMapper(MappedFieldType fieldType) {
super(findSimpleName(fieldType.name()), fieldType,
MultiFields.empty(), new CopyTo.Builder().build());
super(findSimpleName(fieldType.name()), fieldType, MultiFields.empty(), new CopyTo.Builder().build());
}
public MockFieldMapper(String fullName,
MappedFieldType fieldType,
MultiFields multifields,
CopyTo copyTo) {
public MockFieldMapper(String fullName, MappedFieldType fieldType, MultiFields multifields, CopyTo copyTo) {
super(findSimpleName(fullName), fieldType, multifields, copyTo);
}
@ -94,8 +88,7 @@ public class MockFieldMapper extends ParametrizedFieldMapper {
}
@Override
protected void parseCreateField(ParseContext context) {
}
protected void parseCreateField(ParseContext context) {}
public static class Builder extends ParametrizedFieldMapper.Builder {
private final MappedFieldType fieldType;

View File

@ -43,9 +43,8 @@ import org.junit.Before;
import java.util.Collections;
public abstract class AbstractAsyncBulkByScrollActionTestCase<
Request extends AbstractBulkByScrollRequest<Request>,
Response extends BulkByScrollResponse>
extends OpenSearchTestCase {
Request extends AbstractBulkByScrollRequest<Request>,
Response extends BulkByScrollResponse> extends OpenSearchTestCase {
protected ThreadPool threadPool;
protected BulkByScrollTask task;

View File

@ -148,17 +148,18 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, Map<String, String> mappings) throws IOException {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean())
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000))
.put(
IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)
)
.put(indexSettings)
.build();
IndexMetadata.Builder metadata = IndexMetadata.builder(index.getName())
.settings(settings)
.primaryTerm(0, randomIntBetween(1, 100));
IndexMetadata.Builder metadata = IndexMetadata.builder(index.getName()).settings(settings).primaryTerm(0, randomIntBetween(1, 100));
for (Map.Entry<String, String> typeMapping : mappings.entrySet()) {
metadata.putMapping(typeMapping.getKey(), typeMapping.getValue());
}
@ -175,8 +176,14 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
protected DiscoveryNode getDiscoveryNode(String id) {
return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(),
Collections.singleton(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT);
return new DiscoveryNode(
id,
id,
buildNewFakeTransportAddress(),
Collections.emptyMap(),
Collections.singleton(DiscoveryNodeRole.DATA_ROLE),
Version.CURRENT
);
}
protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {
@ -196,22 +203,25 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
} catch (Exception e) {
throw new AssertionError(e);
}
});
}
);
private final RetentionLeaseSyncer retentionLeaseSyncer = new RetentionLeaseSyncer(
(shardId, primaryAllocationId, primaryTerm, retentionLeases, listener) ->
syncRetentionLeases(shardId, retentionLeases, listener),
(shardId, primaryAllocationId, primaryTerm, retentionLeases) -> syncRetentionLeases(shardId, retentionLeases,
ActionListener.wrap(
r -> { },
e -> {
throw new AssertionError("failed to background sync retention lease", e);
})));
(shardId, primaryAllocationId, primaryTerm, retentionLeases, listener) -> syncRetentionLeases(
shardId,
retentionLeases,
listener
),
(shardId, primaryAllocationId, primaryTerm, retentionLeases) -> syncRetentionLeases(
shardId,
retentionLeases,
ActionListener.wrap(r -> {}, e -> { throw new AssertionError("failed to background sync retention lease", e); })
)
);
protected ReplicationGroup(final IndexMetadata indexMetadata) throws IOException {
final ShardRouting primaryRouting = this.createShardRouting("s0", true);
primary = newShard(
primaryRouting, indexMetadata, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer);
primary = newShard(primaryRouting, indexMetadata, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer);
replicas = new CopyOnWriteArrayList<>();
this.indexMetadata = indexMetadata;
updateAllocationIDsOnPrimary();
@ -221,8 +231,13 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
private ShardRouting createShardRouting(String nodeId, boolean primary) {
return TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING,
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
return TestShardRouting.newShardRouting(
shardId,
nodeId,
primary,
ShardRoutingState.INITIALIZING,
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
);
}
protected EngineFactory getEngineFactory(ShardRouting routing) {
@ -236,7 +251,7 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public int indexDocs(final int numOfDoc) throws Exception {
for (int doc = 0; doc < numOfDoc; doc++) {
final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", Integer.toString(docId.incrementAndGet()))
.source("{}", XContentType.JSON);
.source("{}", XContentType.JSON);
final BulkItemResponse response = index(indexRequest);
if (response.isFailed()) {
throw response.getFailure().getCause();
@ -268,11 +283,13 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
return executeWriteRequest(deleteRequest, deleteRequest.getRefreshPolicy());
}
private BulkItemResponse executeWriteRequest(
DocWriteRequest<?> writeRequest, WriteRequest.RefreshPolicy refreshPolicy) throws Exception {
private BulkItemResponse executeWriteRequest(DocWriteRequest<?> writeRequest, WriteRequest.RefreshPolicy refreshPolicy)
throws Exception {
PlainActionFuture<BulkItemResponse> listener = new PlainActionFuture<>();
final ActionListener<BulkShardResponse> wrapBulkListener =
ActionListener.map(listener, bulkShardResponse -> bulkShardResponse.getResponses()[0]);
final ActionListener<BulkShardResponse> wrapBulkListener = ActionListener.map(
listener,
bulkShardResponse -> bulkShardResponse.getResponses()[0]
);
BulkItemRequest[] items = new BulkItemRequest[1];
items[0] = new BulkItemRequest(0, writeRequest);
BulkShardRequest request = new BulkShardRequest(shardId, refreshPolicy, items);
@ -309,8 +326,14 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
activeIds.add(primary.routingEntry().allocationId().getId());
ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry());
IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr);
primary.updateShardState(startedRoutingEntry, primary.getPendingPrimaryTerm(), null,
currentClusterStateVersion.incrementAndGet(), activeIds, routingTable);
primary.updateShardState(
startedRoutingEntry,
primary.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.incrementAndGet(),
activeIds,
routingTable
);
for (final IndexShard replica : replicas) {
recoverReplica(replica);
}
@ -319,15 +342,23 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public IndexShard addReplica() throws IOException {
final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false);
final IndexShard replica =
newShard(replicaRouting, indexMetadata, null, getEngineFactory(replicaRouting), () -> {}, retentionLeaseSyncer);
final IndexShard replica = newShard(
replicaRouting,
indexMetadata,
null,
getEngineFactory(replicaRouting),
() -> {},
retentionLeaseSyncer
);
addReplica(replica);
return replica;
}
public synchronized void addReplica(IndexShard replica) throws IOException {
assert shardRoutings().stream().anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false :
"replica with aId [" + replica.routingEntry().allocationId() + "] already exists";
assert shardRoutings().stream()
.anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false : "replica with aId ["
+ replica.routingEntry().allocationId()
+ "] already exists";
replicas.add(replica);
if (replicationTargets != null) {
replicationTargets.addReplica(replica);
@ -343,15 +374,25 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException {
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
shardId,
nodeId,
false, ShardRoutingState.INITIALIZING,
RecoverySource.PeerRecoverySource.INSTANCE);
shardId,
nodeId,
false,
ShardRoutingState.INITIALIZING,
RecoverySource.PeerRecoverySource.INSTANCE
);
final IndexShard newReplica =
newShard(shardRouting, shardPath, indexMetadata, null, null, getEngineFactory(shardRouting),
getEngineConfigFactory(new IndexSettings(indexMetadata, indexMetadata.getSettings())),
() -> {}, retentionLeaseSyncer, EMPTY_EVENT_LISTENER);
final IndexShard newReplica = newShard(
shardRouting,
shardPath,
indexMetadata,
null,
null,
getEngineFactory(shardRouting),
getEngineConfigFactory(new IndexSettings(indexMetadata, indexMetadata.getSettings())),
() -> {},
retentionLeaseSyncer,
EMPTY_EVENT_LISTENER
);
replicas.add(newReplica);
if (replicationTargets != null) {
replicationTargets.addReplica(newReplica);
@ -371,27 +412,27 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
promoteReplicaToPrimary(replica, (shard, listener) -> {
computeReplicationTargets();
primaryReplicaSyncer.resync(shard,
new ActionListener<PrimaryReplicaSyncer.ResyncTask>() {
@Override
public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) {
listener.onResponse(resyncTask);
fut.onResponse(resyncTask);
}
primaryReplicaSyncer.resync(shard, new ActionListener<PrimaryReplicaSyncer.ResyncTask>() {
@Override
public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) {
listener.onResponse(resyncTask);
fut.onResponse(resyncTask);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
fut.onFailure(e);
}
});
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
fut.onFailure(e);
}
});
});
return fut;
}
public synchronized void promoteReplicaToPrimary(IndexShard replica,
BiConsumer<IndexShard, ActionListener<PrimaryReplicaSyncer.ResyncTask>> primaryReplicaSyncer)
throws IOException {
public synchronized void promoteReplicaToPrimary(
IndexShard replica,
BiConsumer<IndexShard, ActionListener<PrimaryReplicaSyncer.ResyncTask>> primaryReplicaSyncer
) throws IOException {
final long newTerm = indexMetadata.primaryTerm(shardId.id()) + 1;
IndexMetadata.Builder newMetadata = IndexMetadata.builder(indexMetadata).primaryTerm(shardId.id(), newTerm);
indexMetadata = newMetadata.build();
@ -402,13 +443,22 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
ShardRouting primaryRouting = replica.routingEntry().moveActiveReplicaToPrimary();
IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr);
primary.updateShardState(primaryRouting, newTerm, primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(),
activeIds(), routingTable);
primary.updateShardState(
primaryRouting,
newTerm,
primaryReplicaSyncer,
currentClusterStateVersion.incrementAndGet(),
activeIds(),
routingTable
);
}
private synchronized Set<String> activeIds() {
return shardRoutings().stream()
.filter(ShardRouting::active).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet());
.filter(ShardRouting::active)
.map(ShardRouting::allocationId)
.map(AllocationId::getId)
.collect(Collectors.toSet());
}
private synchronized IndexShardRoutingTable routingTable(Function<ShardRouting, ShardRouting> transformer) {
@ -438,11 +488,18 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public void recoverReplica(
IndexShard replica,
BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
boolean markAsRecovering) throws IOException {
boolean markAsRecovering
) throws IOException {
final IndexShardRoutingTable routingTable = routingTable(Function.identity());
final Set<String> inSyncIds = activeIds();
OpenSearchIndexLevelReplicationTestCase.this.recoverUnstartedReplica(
replica, primary, targetSupplier, markAsRecovering, inSyncIds, routingTable);
replica,
primary,
targetSupplier,
markAsRecovering,
inSyncIds,
routingTable
);
OpenSearchIndexLevelReplicationTestCase.this.startReplicaAfterRecovery(replica, primary, inSyncIds, routingTable);
computeReplicationTargets();
}
@ -452,7 +509,9 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
public Future<Void> asyncRecoverReplica(
final IndexShard replica, final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier) {
final IndexShard replica,
final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier
) {
final FutureTask<Void> task = new FutureTask<>(() -> {
recoverReplica(replica, targetSupplier);
return null;
@ -503,7 +562,7 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
assertThat(replica.getMaxSeqNoOfUpdatesOrDeletes(), greaterThanOrEqualTo(primary.getMaxSeqNoOfUpdatesOrDeletes()));
assertThat(getDocIdAndSeqNos(replica), equalTo(docsOnPrimary));
}
} catch (AlreadyClosedException ignored) { }
} catch (AlreadyClosedException ignored) {}
closeShards(this);
} else {
throw new AlreadyClosedException("too bad");
@ -536,9 +595,14 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
private void updateAllocationIDsOnPrimary() throws IOException {
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
primary.updateShardState(
primary.routingEntry(),
primary.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.incrementAndGet(),
activeIds(), routingTable(Function.identity()));
activeIds(),
routingTable(Function.identity())
);
}
private synchronized void computeReplicationTargets() {
@ -550,12 +614,19 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener<ReplicationResponse> listener) {
new SyncRetentionLeases(new RetentionLeaseSyncAction.Request(shardId, leases), this,
ActionListener.map(listener, r -> new ReplicationResponse())).execute();
new SyncRetentionLeases(
new RetentionLeaseSyncAction.Request(shardId, leases),
this,
ActionListener.map(listener, r -> new ReplicationResponse())
).execute();
}
public synchronized RetentionLease addRetentionLease(String id, long retainingSequenceNumber, String source,
ActionListener<ReplicationResponse> listener) {
public synchronized RetentionLease addRetentionLease(
String id,
long retainingSequenceNumber,
String source,
ActionListener<ReplicationResponse> listener
) {
return getPrimary().addRetentionLease(id, retainingSequenceNumber, source, listener);
}
@ -569,8 +640,14 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public void executeRetentionLeasesSyncRequestOnReplica(RetentionLeaseSyncAction.Request request, IndexShard replica) {
final PlainActionFuture<Releasable> acquirePermitFuture = new PlainActionFuture<>();
replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getLastKnownGlobalCheckpoint(),
getPrimary().getMaxSeqNoOfUpdatesOrDeletes(), acquirePermitFuture, ThreadPool.Names.SAME, request);
replica.acquireReplicaOperationPermit(
getPrimary().getOperationPrimaryTerm(),
getPrimary().getLastKnownGlobalCheckpoint(),
getPrimary().getMaxSeqNoOfUpdatesOrDeletes(),
acquirePermitFuture,
ThreadPool.Names.SAME,
request
);
try (Releasable ignored = acquirePermitFuture.actionGet()) {
replica.updateRetentionLeasesOnReplica(request.getRetentionLeases());
replica.persistRetentionLeases();
@ -608,7 +685,8 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
}
protected abstract class ReplicationAction<Request extends ReplicationRequest<Request>,
protected abstract class ReplicationAction<
Request extends ReplicationRequest<Request>,
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
Response extends ReplicationResponse> {
private final Request request;
@ -625,14 +703,18 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
public void execute() {
try {
new ReplicationOperation<>(request, new PrimaryRef(),
ActionListener.map(listener, result -> {
adaptResponse(result.finalResponse, getPrimaryShard());
return result.finalResponse;
}),
new ReplicasRef(), logger, threadPool, opType, primaryTerm, TimeValue.timeValueMillis(20),
TimeValue.timeValueSeconds(60))
.execute();
new ReplicationOperation<>(request, new PrimaryRef(), ActionListener.map(listener, result -> {
adaptResponse(result.finalResponse, getPrimaryShard());
return result.finalResponse;
}),
new ReplicasRef(),
logger,
threadPool,
opType,
primaryTerm,
TimeValue.timeValueMillis(20),
TimeValue.timeValueSeconds(60)
).execute();
} catch (Exception e) {
listener.onFailure(e);
}
@ -718,7 +800,8 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
final long primaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<ReplicationOperation.ReplicaResponse> listener) {
final ActionListener<ReplicationOperation.ReplicaResponse> listener
) {
IndexShard replica = replicationTargets.findReplicaShard(replicaRouting);
replica.acquireReplicaOperationPermit(
getPrimaryShard().getPendingPrimaryTerm(),
@ -728,26 +811,38 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
try {
performOnReplica(request, replica);
releasable.close();
delegatedListener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(),
replica.getLastKnownGlobalCheckpoint()));
delegatedListener.onResponse(
new ReplicaResponse(replica.getLocalCheckpoint(), replica.getLastKnownGlobalCheckpoint())
);
} catch (final Exception e) {
Releasables.closeWhileHandlingException(releasable);
delegatedListener.onFailure(e);
}
}),
ThreadPool.Names.WRITE, request);
ThreadPool.Names.WRITE,
request
);
}
@Override
public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception,
ActionListener<Void> listener) {
public void failShardIfNeeded(
ShardRouting replica,
long primaryTerm,
String message,
Exception exception,
ActionListener<Void> listener
) {
throw new UnsupportedOperationException("failing shard " + replica + " isn't supported. failure: " + message, exception);
}
@Override
public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm,
ActionListener<Void> listener) {
throw new UnsupportedOperationException("can't mark " + shardId + ", aid [" + allocationId + "] as stale");
public void markShardCopyAsStaleIfNeeded(
ShardId shardId,
String allocationId,
long primaryTerm,
ActionListener<Void> listener
) {
throw new UnsupportedOperationException("can't mark " + shardId + ", aid [" + allocationId + "] as stale");
}
}
@ -786,19 +881,30 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
@Override
protected void performOnPrimary(IndexShard primary, BulkShardRequest request, ActionListener<PrimaryResult> listener) {
executeShardBulkOnPrimary(primary, request,
ActionListener.map(listener, result -> new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful)));
executeShardBulkOnPrimary(
primary,
request,
ActionListener.map(listener, result -> new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful))
);
}
@Override
protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
executeShardBulkOnReplica(request, replica, getPrimaryShard().getPendingPrimaryTerm(),
getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes());
executeShardBulkOnReplica(
request,
replica,
getPrimaryShard().getPendingPrimaryTerm(),
getPrimaryShard().getLastKnownGlobalCheckpoint(),
getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()
);
}
}
private void executeShardBulkOnPrimary(IndexShard primary, BulkShardRequest request,
ActionListener<TransportWriteAction.WritePrimaryResult<BulkShardRequest, BulkShardResponse>> listener) {
private void executeShardBulkOnPrimary(
IndexShard primary,
BulkShardRequest request,
ActionListener<TransportWriteAction.WritePrimaryResult<BulkShardRequest, BulkShardResponse>> listener
) {
for (BulkItemRequest itemRequest : request.items()) {
if (itemRequest.request() instanceof IndexRequest) {
((IndexRequest) itemRequest.request()).process(Version.CURRENT, null, index.getName());
@ -808,32 +914,64 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
primary.acquirePrimaryOperationPermit(permitAcquiredFuture, ThreadPool.Names.SAME, request);
try (Releasable ignored = permitAcquiredFuture.actionGet()) {
MappingUpdatePerformer noopMappingUpdater = (update, shardId, type, listener1) -> {};
TransportShardBulkAction.performOnPrimary(request, primary, null, System::currentTimeMillis, noopMappingUpdater,
null, ActionTestUtils.assertNoFailureListener(result -> {
TransportWriteActionTestHelper.performPostWriteActions(primary, request,
((TransportWriteAction.WritePrimaryResult<BulkShardRequest, BulkShardResponse>) result).location, logger);
TransportShardBulkAction.performOnPrimary(
request,
primary,
null,
System::currentTimeMillis,
noopMappingUpdater,
null,
ActionTestUtils.assertNoFailureListener(result -> {
TransportWriteActionTestHelper.performPostWriteActions(
primary,
request,
((TransportWriteAction.WritePrimaryResult<BulkShardRequest, BulkShardResponse>) result).location,
logger
);
listener.onResponse((TransportWriteAction.WritePrimaryResult<BulkShardRequest, BulkShardResponse>) result);
}), threadPool, Names.WRITE);
}),
threadPool,
Names.WRITE
);
} catch (Exception e) {
listener.onFailure(e);
}
}
private <Request extends ReplicatedWriteRequest & DocWriteRequest> BulkShardRequest executeReplicationRequestOnPrimary(
IndexShard primary, Request request) throws Exception {
final BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, request.getRefreshPolicy(),
new BulkItemRequest[]{new BulkItemRequest(0, request)});
IndexShard primary,
Request request
) throws Exception {
final BulkShardRequest bulkShardRequest = new BulkShardRequest(
shardId,
request.getRefreshPolicy(),
new BulkItemRequest[] { new BulkItemRequest(0, request) }
);
final PlainActionFuture<BulkShardRequest> res = new PlainActionFuture<>();
executeShardBulkOnPrimary(
primary, bulkShardRequest, ActionListener.map(res, TransportReplicationAction.PrimaryResult::replicaRequest));
primary,
bulkShardRequest,
ActionListener.map(res, TransportReplicationAction.PrimaryResult::replicaRequest)
);
return res.get();
}
private void executeShardBulkOnReplica(BulkShardRequest request, IndexShard replica, long operationPrimaryTerm,
long globalCheckpointOnPrimary, long maxSeqNoOfUpdatesOrDeletes) throws Exception {
private void executeShardBulkOnReplica(
BulkShardRequest request,
IndexShard replica,
long operationPrimaryTerm,
long globalCheckpointOnPrimary,
long maxSeqNoOfUpdatesOrDeletes
) throws Exception {
final PlainActionFuture<Releasable> permitAcquiredFuture = new PlainActionFuture<>();
replica.acquireReplicaOperationPermit(operationPrimaryTerm, globalCheckpointOnPrimary,
maxSeqNoOfUpdatesOrDeletes, permitAcquiredFuture, ThreadPool.Names.SAME, request);
replica.acquireReplicaOperationPermit(
operationPrimaryTerm,
globalCheckpointOnPrimary,
maxSeqNoOfUpdatesOrDeletes,
permitAcquiredFuture,
ThreadPool.Names.SAME,
request
);
final Translog.Location location;
try (Releasable ignored = permitAcquiredFuture.actionGet()) {
location = TransportShardBulkAction.performOnReplica(request, replica);
@ -863,34 +1001,48 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica, long term) throws Exception {
executeShardBulkOnReplica(request, replica, term,
group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes());
executeShardBulkOnReplica(
request,
replica,
term,
group.primary.getLastKnownGlobalCheckpoint(),
group.primary.getMaxSeqNoOfUpdatesOrDeletes()
);
}
/**
* Executes the delete request on the given replica shard.
*/
void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
executeShardBulkOnReplica(request, replica, group.primary.getPendingPrimaryTerm(),
group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes());
executeShardBulkOnReplica(
request,
replica,
group.primary.getPendingPrimaryTerm(),
group.primary.getLastKnownGlobalCheckpoint(),
group.primary.getMaxSeqNoOfUpdatesOrDeletes()
);
}
class GlobalCheckpointSync extends ReplicationAction<
GlobalCheckpointSyncAction.Request,
GlobalCheckpointSyncAction.Request,
ReplicationResponse> {
GlobalCheckpointSyncAction.Request,
GlobalCheckpointSyncAction.Request,
ReplicationResponse> {
GlobalCheckpointSync(final ActionListener<ReplicationResponse> listener, final ReplicationGroup replicationGroup) {
super(
new GlobalCheckpointSyncAction.Request(replicationGroup.getPrimary().shardId()),
listener,
replicationGroup,
"global_checkpoint_sync");
new GlobalCheckpointSyncAction.Request(replicationGroup.getPrimary().shardId()),
listener,
replicationGroup,
"global_checkpoint_sync"
);
}
@Override
protected void performOnPrimary(IndexShard primary, GlobalCheckpointSyncAction.Request request,
ActionListener<PrimaryResult> listener) {
protected void performOnPrimary(
IndexShard primary,
GlobalCheckpointSyncAction.Request request,
ActionListener<PrimaryResult> listener
) {
ActionListener.completeWith(listener, () -> {
primary.sync();
return new PrimaryResult(request, new ReplicationResponse());
@ -920,26 +1072,50 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
@Override
protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception {
executeResyncOnReplica(replica, request, getPrimaryShard().getPendingPrimaryTerm(),
getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes());
executeResyncOnReplica(
replica,
request,
getPrimaryShard().getPendingPrimaryTerm(),
getPrimaryShard().getLastKnownGlobalCheckpoint(),
getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()
);
}
}
private TransportWriteAction.WritePrimaryResult<ResyncReplicationRequest, ResyncReplicationResponse> executeResyncOnPrimary(
IndexShard primary, ResyncReplicationRequest request) {
IndexShard primary,
ResyncReplicationRequest request
) {
final TransportWriteAction.WritePrimaryResult<ResyncReplicationRequest, ResyncReplicationResponse> result =
new TransportWriteAction.WritePrimaryResult<>(TransportResyncReplicationAction.performOnPrimary(request),
new ResyncReplicationResponse(), null, null, primary, logger);
new TransportWriteAction.WritePrimaryResult<>(
TransportResyncReplicationAction.performOnPrimary(request),
new ResyncReplicationResponse(),
null,
null,
primary,
logger
);
TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger);
return result;
}
private void executeResyncOnReplica(IndexShard replica, ResyncReplicationRequest request, long operationPrimaryTerm,
long globalCheckpointOnPrimary, long maxSeqNoOfUpdatesOrDeletes) throws Exception {
private void executeResyncOnReplica(
IndexShard replica,
ResyncReplicationRequest request,
long operationPrimaryTerm,
long globalCheckpointOnPrimary,
long maxSeqNoOfUpdatesOrDeletes
) throws Exception {
final Translog.Location location;
final PlainActionFuture<Releasable> acquirePermitFuture = new PlainActionFuture<>();
replica.acquireReplicaOperationPermit(operationPrimaryTerm, globalCheckpointOnPrimary,
maxSeqNoOfUpdatesOrDeletes, acquirePermitFuture, ThreadPool.Names.SAME, request);
replica.acquireReplicaOperationPermit(
operationPrimaryTerm,
globalCheckpointOnPrimary,
maxSeqNoOfUpdatesOrDeletes,
acquirePermitFuture,
ThreadPool.Names.SAME,
request
);
try (Releasable ignored = acquirePermitFuture.actionGet()) {
location = TransportResyncReplicationAction.performOnReplica(request, replica);
}
@ -947,16 +1123,24 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
}
class SyncRetentionLeases extends ReplicationAction<
RetentionLeaseSyncAction.Request, RetentionLeaseSyncAction.Request, RetentionLeaseSyncAction.Response> {
RetentionLeaseSyncAction.Request,
RetentionLeaseSyncAction.Request,
RetentionLeaseSyncAction.Response> {
SyncRetentionLeases(RetentionLeaseSyncAction.Request request, ReplicationGroup group,
ActionListener<RetentionLeaseSyncAction.Response> listener) {
SyncRetentionLeases(
RetentionLeaseSyncAction.Request request,
ReplicationGroup group,
ActionListener<RetentionLeaseSyncAction.Response> listener
) {
super(request, listener, group, "sync-retention-leases");
}
@Override
protected void performOnPrimary(IndexShard primary, RetentionLeaseSyncAction.Request request,
ActionListener<PrimaryResult> listener) {
protected void performOnPrimary(
IndexShard primary,
RetentionLeaseSyncAction.Request request,
ActionListener<PrimaryResult> listener
) {
ActionListener.completeWith(listener, () -> {
primary.persistRetentionLeases();
return new PrimaryResult(request, new RetentionLeaseSyncAction.Response());

View File

@ -50,12 +50,16 @@ public class RetentionLeaseUtils {
* @return the map from retention lease ID to retention lease
*/
public static Map<String, RetentionLease> toMapExcludingPeerRecoveryRetentionLeases(final RetentionLeases retentionLeases) {
return retentionLeases.leases().stream()
return retentionLeases.leases()
.stream()
.filter(l -> ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals(l.source()) == false)
.collect(Collectors.toMap(RetentionLease::id, Function.identity(),
(o1, o2) -> {
throw new AssertionError("unexpectedly merging " + o1 + " and " + o2);
},
LinkedHashMap::new));
.collect(
Collectors.toMap(
RetentionLease::id,
Function.identity(),
(o1, o2) -> { throw new AssertionError("unexpectedly merging " + o1 + " and " + o2); },
LinkedHashMap::new
)
);
}
}

View File

@ -125,7 +125,8 @@ import static org.hamcrest.Matchers.hasSize;
*/
public abstract class IndexShardTestCase extends OpenSearchTestCase {
public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() {};
public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() {
};
private static final AtomicBoolean failOnShardFailures = new AtomicBoolean(true);
@ -229,11 +230,16 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param engineFactory the engine factory to use for this shard
*/
protected IndexShard newShard(boolean primary, Settings settings, EngineFactory engineFactory) throws IOException {
final RecoverySource recoverySource =
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE;
final ShardRouting shardRouting =
TestShardRouting.newShardRouting(
new ShardId("index", "_na_", 0), randomAlphaOfLength(10), primary, ShardRoutingState.INITIALIZING, recoverySource);
final RecoverySource recoverySource = primary
? RecoverySource.EmptyStoreRecoverySource.INSTANCE
: RecoverySource.PeerRecoverySource.INSTANCE;
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
new ShardId("index", "_na_", 0),
randomAlphaOfLength(10),
primary,
ShardRoutingState.INITIALIZING,
recoverySource
);
return newShard(shardRouting, settings, engineFactory);
}
@ -242,7 +248,7 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
}
protected IndexShard newShard(ShardRouting shardRouting, final Settings settings, final IndexingOperationListener... listeners)
throws IOException {
throws IOException {
return newShard(shardRouting, settings, new InternalEngineFactory(), listeners);
}
@ -255,19 +261,23 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(
final ShardRouting shardRouting,
final Settings settings,
final EngineFactory engineFactory,
final IndexingOperationListener... listeners) throws IOException {
final ShardRouting shardRouting,
final Settings settings,
final EngineFactory engineFactory,
final IndexingOperationListener... listeners
) throws IOException {
assert shardRouting.initializing() : shardRouting;
Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean())
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000))
.put(settings)
.build();
Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean())
.put(
IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)
)
.put(settings)
.build();
IndexMetadata.Builder metadata = IndexMetadata.builder(shardRouting.getIndexName())
.settings(indexSettings)
.primaryTerm(0, primaryTerm)
@ -284,9 +294,13 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(5), primary,
ShardRouting shardRouting = TestShardRouting.newShardRouting(
shardId,
randomAlphaOfLength(5),
primary,
ShardRoutingState.INITIALIZING,
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
);
return newShard(shardRouting, Settings.EMPTY, new InternalEngineFactory(), listeners);
}
@ -298,9 +312,13 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
* (ready to recover from another shard)
*/
protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper)
throws IOException {
protected IndexShard newShard(
ShardId shardId,
boolean primary,
String nodeId,
IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper
) throws IOException {
return newShard(shardId, primary, nodeId, indexMetadata, readerWrapper, () -> {});
}
@ -312,13 +330,29 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
* (ready to recover from another shard)
*/
protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper,
Runnable globalCheckpointSyncer) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING,
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
protected IndexShard newShard(
ShardId shardId,
boolean primary,
String nodeId,
IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper,
Runnable globalCheckpointSyncer
) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(
shardId,
nodeId,
primary,
ShardRoutingState.INITIALIZING,
primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
);
return newShard(
shardRouting, indexMetadata, readerWrapper, new InternalEngineFactory(), globalCheckpointSyncer, RetentionLeaseSyncer.EMPTY);
shardRouting,
indexMetadata,
readerWrapper,
new InternalEngineFactory(),
globalCheckpointSyncer,
RetentionLeaseSyncer.EMPTY
);
}
/**
@ -329,10 +363,13 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param indexMetadata indexMetadata for the shard, including any mapping
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
EngineFactory engineFactory, IndexingOperationListener... listeners)
throws IOException {
protected IndexShard newShard(
ShardRouting routing,
IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
EngineFactory engineFactory,
IndexingOperationListener... listeners
) throws IOException {
return newShard(routing, indexMetadata, indexReaderWrapper, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners);
}
@ -345,18 +382,32 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param globalCheckpointSyncer callback for syncing global checkpoints
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
@Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer,
IndexingOperationListener... listeners)
throws IOException {
protected IndexShard newShard(
ShardRouting routing,
IndexMetadata indexMetadata,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
@Nullable EngineFactory engineFactory,
Runnable globalCheckpointSyncer,
RetentionLeaseSyncer retentionLeaseSyncer,
IndexingOperationListener... listeners
) throws IOException {
// add node id as name to settings for proper logging
final ShardId shardId = routing.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
return newShard(routing, shardPath, indexMetadata, null, indexReaderWrapper, engineFactory,
new EngineConfigFactory(new IndexSettings(indexMetadata, indexMetadata.getSettings())), globalCheckpointSyncer,
retentionLeaseSyncer, EMPTY_EVENT_LISTENER, listeners);
return newShard(
routing,
shardPath,
indexMetadata,
null,
indexReaderWrapper,
engineFactory,
new EngineConfigFactory(new IndexSettings(indexMetadata, indexMetadata.getSettings())),
globalCheckpointSyncer,
retentionLeaseSyncer,
EMPTY_EVENT_LISTENER,
listeners
);
}
/**
@ -370,12 +421,19 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param indexEventListener index event listener
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetadata indexMetadata,
@Nullable CheckedFunction<IndexSettings, Store, IOException> storeProvider,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
@Nullable EngineFactory engineFactory, @Nullable EngineConfigFactory engineConfigFactory,
Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer,
IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException {
protected IndexShard newShard(
ShardRouting routing,
ShardPath shardPath,
IndexMetadata indexMetadata,
@Nullable CheckedFunction<IndexSettings, Store, IOException> storeProvider,
@Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
@Nullable EngineFactory engineFactory,
@Nullable EngineConfigFactory engineConfigFactory,
Runnable globalCheckpointSyncer,
RetentionLeaseSyncer retentionLeaseSyncer,
IndexEventListener indexEventListener,
IndexingOperationListener... listeners
) throws IOException {
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings);
final IndexShard indexShard;
@ -386,36 +444,43 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
boolean success = false;
try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(),
indexSettings.getSettings(), "index");
MapperService mapperService = MapperTestUtils.newMapperService(
xContentRegistry(),
createTempDir(),
indexSettings.getSettings(),
"index"
);
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY);
SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap());
final Engine.Warmer warmer = createTestWarmer(indexSettings);
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(nodeSettings,
CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(
nodeSettings,
Collections.emptyList(),
clusterSettings);
clusterSettings
);
indexShard = new IndexShard(
routing,
indexSettings,
shardPath,
store,
() -> null,
indexCache,
mapperService,
similarityService,
engineFactory,
engineConfigFactory,
indexEventListener,
indexReaderWrapper,
threadPool,
BigArrays.NON_RECYCLING_INSTANCE,
warmer,
Collections.emptyList(),
Arrays.asList(listeners),
globalCheckpointSyncer,
retentionLeaseSyncer,
breakerService);
routing,
indexSettings,
shardPath,
store,
() -> null,
indexCache,
mapperService,
similarityService,
engineFactory,
engineConfigFactory,
indexEventListener,
indexReaderWrapper,
threadPool,
BigArrays.NON_RECYCLING_INSTANCE,
warmer,
Collections.emptyList(),
Arrays.asList(listeners),
globalCheckpointSyncer,
retentionLeaseSyncer,
breakerService
);
indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER);
success = true;
} finally {
@ -433,9 +498,14 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
*/
protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException {
final ShardRouting shardRouting = current.routingEntry();
return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting,
shardRouting.primary() ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
), listeners);
return reinitShard(
current,
ShardRoutingHelper.initWithSameId(
shardRouting,
shardRouting.primary() ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
),
listeners
);
}
/**
@ -445,8 +515,14 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param listeners new listerns to use for the newly created shard
*/
protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException {
return reinitShard(current, routing, current.indexSettings.getIndexMetadata(), current.engineFactory,
current.engineConfigFactory, listeners);
return reinitShard(
current,
routing,
current.indexSettings.getIndexMetadata(),
current.engineFactory,
current.engineConfigFactory,
listeners
);
}
/**
@ -457,20 +533,28 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param indexMetadata the index metadata to use for the newly created shard
* @param engineFactory the engine factory for the new shard
*/
protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexMetadata indexMetadata, EngineFactory engineFactory,
EngineConfigFactory engineConfigFactory, IndexingOperationListener... listeners) throws IOException {
protected IndexShard reinitShard(
IndexShard current,
ShardRouting routing,
IndexMetadata indexMetadata,
EngineFactory engineFactory,
EngineConfigFactory engineConfigFactory,
IndexingOperationListener... listeners
) throws IOException {
closeShards(current);
return newShard(
routing,
current.shardPath(),
indexMetadata,
null,
null,
engineFactory,
engineConfigFactory,
current.getGlobalCheckpointSyncer(),
current.getRetentionLeaseSyncer(),
EMPTY_EVENT_LISTENER, listeners);
routing,
current.shardPath(),
indexMetadata,
null,
null,
engineFactory,
engineConfigFactory,
current.getGlobalCheckpointSyncer(),
current.getRetentionLeaseSyncer(),
EMPTY_EVENT_LISTENER,
listeners
);
}
/**
@ -514,8 +598,8 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param settings the settings to use for this shard
* @param engineFactory the engine factory to use for this shard
*/
protected IndexShard newStartedShard(
final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException {
protected IndexShard newStartedShard(final boolean primary, final Settings settings, final EngineFactory engineFactory)
throws IOException {
return newStartedShard(p -> newShard(p, settings, engineFactory), primary);
}
@ -525,8 +609,8 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param shardFunction shard factory function
* @param primary controls whether the shard will be a primary or a replica.
*/
protected IndexShard newStartedShard(CheckedFunction<Boolean, IndexShard, IOException> shardFunction,
boolean primary) throws IOException {
protected IndexShard newStartedShard(CheckedFunction<Boolean, IndexShard, IOException> shardFunction, boolean primary)
throws IOException {
IndexShard shard = shardFunction.apply(primary);
if (primary) {
recoverShardFromStore(shard);
@ -564,9 +648,10 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
}
protected void recoverShardFromStore(IndexShard primary) throws IOException {
primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(),
getFakeDiscoNode(primary.routingEntry().currentNodeId()),
null));
primary.markAsRecovering(
"store",
new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null)
);
recoverFromStore(primary);
updateRoutingEntry(primary, ShardRoutingHelper.moveToStarted(primary.routingEntry()));
}
@ -574,13 +659,16 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
protected static AtomicLong currentClusterStateVersion = new AtomicLong();
public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRouting) throws IOException {
Set<String> inSyncIds =
shardRouting.active() ? Collections.singleton(shardRouting.allocationId().getId()) : Collections.emptySet();
IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(shardRouting.shardId())
.addShard(shardRouting)
.build();
shard.updateShardState(shardRouting, shard.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
inSyncIds, newRoutingTable);
Set<String> inSyncIds = shardRouting.active() ? Collections.singleton(shardRouting.allocationId().getId()) : Collections.emptySet();
IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(shardRouting.shardId()).addShard(shardRouting).build();
shard.updateShardState(
shardRouting,
shard.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.incrementAndGet(),
inSyncIds,
newRoutingTable
);
}
protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException {
@ -594,22 +682,29 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
}
protected DiscoveryNode getFakeDiscoNode(String id) {
return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), DiscoveryNodeRole.BUILT_IN_ROLES,
Version.CURRENT);
return new DiscoveryNode(
id,
id,
buildNewFakeTransportAddress(),
Collections.emptyMap(),
DiscoveryNodeRole.BUILT_IN_ROLES,
Version.CURRENT
);
}
/** recovers a replica from the given primary **/
protected void recoverReplica(IndexShard replica, IndexShard primary, boolean startReplica) throws IOException {
recoverReplica(replica, primary,
(r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener),
true, startReplica);
recoverReplica(replica, primary, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener), true, startReplica);
}
/** recovers a replica from the given primary **/
protected void recoverReplica(final IndexShard replica,
final IndexShard primary,
final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
final boolean markAsRecovering, final boolean markAsStarted) throws IOException {
protected void recoverReplica(
final IndexShard replica,
final IndexShard primary,
final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
final boolean markAsRecovering,
final boolean markAsStarted
) throws IOException {
IndexShardRoutingTable.Builder newRoutingTable = new IndexShardRoutingTable.Builder(replica.shardId());
newRoutingTable.addShard(primary.routingEntry());
if (replica.routingEntry().isRelocationTarget() == false) {
@ -634,12 +729,14 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
* @param targetSupplier supplies an instance of {@link RecoveryTarget}
* @param markAsRecovering set to {@code false} if the replica is marked as recovering
*/
protected final void recoverUnstartedReplica(final IndexShard replica,
final IndexShard primary,
final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
final boolean markAsRecovering,
final Set<String> inSyncIds,
final IndexShardRoutingTable routingTable) throws IOException {
protected final void recoverUnstartedReplica(
final IndexShard replica,
final IndexShard primary,
final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
final boolean markAsRecovering,
final Set<String> inSyncIds,
final IndexShardRoutingTable routingTable
) throws IOException {
final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId());
final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId());
if (markAsRecovering) {
@ -651,14 +748,31 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
final RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint();
final StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(
logger, rNode, recoveryTarget, startingSeqNo);
logger,
rNode,
recoveryTarget,
startingSeqNo
);
int fileChunkSizeInBytes = Math.toIntExact(
randomBoolean() ? RecoverySettings.DEFAULT_CHUNK_SIZE.getBytes() : randomIntBetween(1, 10 * 1024 * 1024));
final RecoverySourceHandler recovery = new RecoverySourceHandler(primary,
new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), threadPool,
request, fileChunkSizeInBytes, between(1, 8), between(1, 8));
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable);
randomBoolean() ? RecoverySettings.DEFAULT_CHUNK_SIZE.getBytes() : randomIntBetween(1, 10 * 1024 * 1024)
);
final RecoverySourceHandler recovery = new RecoverySourceHandler(
primary,
new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()),
threadPool,
request,
fileChunkSizeInBytes,
between(1, 8),
between(1, 8)
);
primary.updateShardState(
primary.routingEntry(),
primary.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.incrementAndGet(),
inSyncIds,
routingTable
);
try {
PlainActionFuture<RecoveryResponse> future = new PlainActionFuture<>();
recovery.recoverToTarget(future);
@ -670,29 +784,39 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
}
}
protected void startReplicaAfterRecovery(IndexShard replica, IndexShard primary, Set<String> inSyncIds,
IndexShardRoutingTable routingTable) throws IOException {
protected void startReplicaAfterRecovery(
IndexShard replica,
IndexShard primary,
Set<String> inSyncIds,
IndexShardRoutingTable routingTable
) throws IOException {
ShardRouting initializingReplicaRouting = replica.routingEntry();
IndexShardRoutingTable newRoutingTable =
initializingReplicaRouting.isRelocationTarget() ?
new IndexShardRoutingTable.Builder(routingTable)
.removeShard(primary.routingEntry())
.addShard(replica.routingEntry())
.build() :
new IndexShardRoutingTable.Builder(routingTable)
.removeShard(initializingReplicaRouting)
IndexShardRoutingTable newRoutingTable = initializingReplicaRouting.isRelocationTarget()
? new IndexShardRoutingTable.Builder(routingTable).removeShard(primary.routingEntry()).addShard(replica.routingEntry()).build()
: new IndexShardRoutingTable.Builder(routingTable).removeShard(initializingReplicaRouting)
.addShard(replica.routingEntry())
.build();
Set<String> inSyncIdsWithReplica = new HashSet<>(inSyncIds);
inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId());
// update both primary and replica shard state
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable);
replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPendingPrimaryTerm(), null,
currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable);
primary.updateShardState(
primary.routingEntry(),
primary.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.incrementAndGet(),
inSyncIdsWithReplica,
newRoutingTable
);
replica.updateShardState(
replica.routingEntry().moveToStarted(),
replica.getPendingPrimaryTerm(),
null,
currentClusterStateVersion.get(),
inSyncIdsWithReplica,
newRoutingTable
);
}
/**
* promotes a replica to primary, incrementing it's term and starting it if needed
*/
@ -704,17 +828,22 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
null,
true,
ShardRoutingState.STARTED,
replica.routingEntry().allocationId());
replica.routingEntry().allocationId()
);
final IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(routingTable)
.removeShard(replica.routingEntry())
final IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(routingTable).removeShard(replica.routingEntry())
.addShard(routingEntry)
.build();
replica.updateShardState(routingEntry, replica.getPendingPrimaryTerm() + 1,
(is, listener) ->
listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())),
replica.updateShardState(
routingEntry,
replica.getPendingPrimaryTerm() + 1,
(is, listener) -> listener.onResponse(
new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())
),
currentClusterStateVersion.incrementAndGet(),
inSyncIds, newRoutingTable);
inSyncIds,
newRoutingTable
);
}
public static Set<String> getShardDocUIDs(final IndexShard shard) throws IOException {
@ -753,33 +882,69 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
return indexDoc(shard, type, id, source, XContentType.JSON, null);
}
protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType,
String routing)
throws IOException {
protected Engine.IndexResult indexDoc(
IndexShard shard,
String type,
String id,
String source,
XContentType xContentType,
String routing
) throws IOException {
SourceToParse sourceToParse = new SourceToParse(
shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType, routing);
shard.shardId().getIndexName(),
type,
id,
new BytesArray(source),
xContentType,
routing
);
Engine.IndexResult result;
if (shard.routingEntry().primary()) {
result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse,
SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
result = shard.applyIndexOperationOnPrimary(
Versions.MATCH_ANY,
VersionType.INTERNAL,
sourceToParse,
SequenceNumbers.UNASSIGNED_SEQ_NO,
0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
false
);
if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
updateMappings(shard, IndexMetadata.builder(shard.indexSettings().getIndexMetadata())
.putMapping(type, result.getRequiredMappingUpdate().toString()).build());
result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse,
SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
updateMappings(
shard,
IndexMetadata.builder(shard.indexSettings().getIndexMetadata())
.putMapping(type, result.getRequiredMappingUpdate().toString())
.build()
);
result = shard.applyIndexOperationOnPrimary(
Versions.MATCH_ANY,
VersionType.INTERNAL,
sourceToParse,
SequenceNumbers.UNASSIGNED_SEQ_NO,
0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
false
);
}
shard.sync(); // advance local checkpoint
shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(),
shard.getLocalCheckpoint());
shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getLocalCheckpoint());
} else {
final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1;
shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates
result = shard.applyIndexOperationOnReplica(seqNo, shard.getOperationPrimaryTerm(), 0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
result = shard.applyIndexOperationOnReplica(
seqNo,
shard.getOperationPrimaryTerm(),
0,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
false,
sourceToParse
);
shard.sync(); // advance local checkpoint
if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
throw new TransportReplicationAction.RetryOnReplicaException(shard.shardId,
"Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate());
throw new TransportReplicationAction.RetryOnReplicaException(
shard.shardId,
"Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()
);
}
}
return result;
@ -787,18 +952,25 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
protected void updateMappings(IndexShard shard, IndexMetadata indexMetadata) {
shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE);
shard.indexSettings().updateIndexMetadata(
IndexMetadata.builder(indexMetadata).putMapping(new MappingMetadata(shard.mapperService().documentMapper())).build());
shard.indexSettings()
.updateIndexMetadata(
IndexMetadata.builder(indexMetadata).putMapping(new MappingMetadata(shard.mapperService().documentMapper())).build()
);
}
protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException {
final Engine.DeleteResult result;
if (shard.routingEntry().primary()) {
result = shard.applyDeleteOperationOnPrimary(
Versions.MATCH_ANY, type, id, VersionType.INTERNAL, SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
Versions.MATCH_ANY,
type,
id,
VersionType.INTERNAL,
SequenceNumbers.UNASSIGNED_SEQ_NO,
0
);
shard.sync(); // advance local checkpoint
shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(),
shard.getLocalCheckpoint());
shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getLocalCheckpoint());
} else {
final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1;
shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates
@ -823,25 +995,27 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
}
/** Recover a shard from a snapshot using a given repository **/
protected void recoverShardFromSnapshot(final IndexShard shard,
final Snapshot snapshot,
final Repository repository) {
protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, final Repository repository) {
final Version version = Version.CURRENT;
final ShardId shardId = shard.shardId();
final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID());
final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId());
final RecoverySource.SnapshotRecoverySource recoverySource =
new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, version, indexId);
final ShardRouting shardRouting =
TestShardRouting.newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource);
final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(
UUIDs.randomBase64UUID(),
snapshot,
version,
indexId
);
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
shardId,
node.getId(),
true,
ShardRoutingState.INITIALIZING,
recoverySource
);
shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null));
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
repository.restoreShard(shard.store(),
snapshot.getSnapshotId(),
indexId,
shard.shardId(),
shard.recoveryState(),
future);
repository.restoreShard(shard.store(), snapshot.getSnapshotId(), indexId, shard.shardId(), shard.recoveryState(), future);
future.actionGet();
}
@ -850,20 +1024,29 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase {
*
* @return new shard generation
*/
protected String snapshotShard(final IndexShard shard,
final Snapshot snapshot,
final Repository repository) throws IOException {
protected String snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException {
final Index index = shard.shardId().getIndex();
final IndexId indexId = new IndexId(index.getName(), index.getUUID());
final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(
OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository).shardGenerations().getShardGen(
indexId, shard.shardId().getId()));
OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository)
.shardGenerations()
.getShardGen(indexId, shard.shardId().getId())
);
final PlainActionFuture<String> future = PlainActionFuture.newFuture();
final String shardGen;
try (Engine.IndexCommitRef indexCommitRef = shard.acquireLastIndexCommit(true)) {
repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId,
indexCommitRef.getIndexCommit(), null, snapshotStatus, Version.CURRENT,
Collections.emptyMap(), future);
repository.snapshotShard(
shard.store(),
shard.mapperService(),
snapshot.getSnapshotId(),
indexId,
indexCommitRef.getIndexCommit(),
null,
snapshotStatus,
Version.CURRENT,
Collections.emptyMap(),
future
);
shardGen = future.actionGet();
}

View File

@ -72,16 +72,13 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i
}
@Override
protected void doStart() {
}
protected void doStart() {}
@Override
protected void doStop() {
}
protected void doStop() {}
@Override
protected void doClose() {
}
protected void doClose() {}
@Override
public RepositoryMetadata getMetadata() {
@ -106,25 +103,42 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i
@Override
public void getRepositoryData(ActionListener<RepositoryData> listener) {
final IndexId indexId = new IndexId(indexName, "blah");
listener.onResponse(new RepositoryData(EMPTY_REPO_GEN, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(),
Collections.singletonMap(indexId, emptyList()), ShardGenerations.EMPTY, IndexMetaDataGenerations.EMPTY));
listener.onResponse(
new RepositoryData(
EMPTY_REPO_GEN,
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
Collections.singletonMap(indexId, emptyList()),
ShardGenerations.EMPTY,
IndexMetaDataGenerations.EMPTY
)
);
}
@Override
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, Metadata metadata) {
}
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, Metadata metadata) {}
@Override
public void finalizeSnapshot(ShardGenerations shardGenerations, long repositoryStateId,
Metadata clusterMetadata, SnapshotInfo snapshotInfo, Version repositoryMetaVersion,
Function<ClusterState, ClusterState> stateTransformer,
ActionListener<RepositoryData> listener) {
public void finalizeSnapshot(
ShardGenerations shardGenerations,
long repositoryStateId,
Metadata clusterMetadata,
SnapshotInfo snapshotInfo,
Version repositoryMetaVersion,
Function<ClusterState, ClusterState> stateTransformer,
ActionListener<RepositoryData> listener
) {
listener.onResponse(null);
}
@Override
public void deleteSnapshots(Collection<SnapshotId> snapshotIds, long repositoryStateId, Version repositoryMetaVersion,
ActionListener<RepositoryData> listener) {
public void deleteSnapshots(
Collection<SnapshotId> snapshotIds,
long repositoryStateId,
Version repositoryMetaVersion,
ActionListener<RepositoryData> listener
) {
listener.onResponse(null);
}
@ -144,8 +158,7 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i
}
@Override
public void endVerification(String verificationToken) {
}
public void endVerification(String verificationToken) {}
@Override
public boolean isReadOnly() {
@ -153,10 +166,18 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i
}
@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId,
IndexCommit snapshotIndexCommit, String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus,
Version repositoryMetaVersion, Map<String, Object> userMetadata, ActionListener<String> listener) {
}
public void snapshotShard(
Store store,
MapperService mapperService,
SnapshotId snapshotId,
IndexId indexId,
IndexCommit snapshotIndexCommit,
String shardStateIdentifier,
IndexShardSnapshotStatus snapshotStatus,
Version repositoryMetaVersion,
Map<String, Object> userMetadata,
ActionListener<String> listener
) {}
@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
@ -164,22 +185,28 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i
}
@Override
public void verify(String verificationToken, DiscoveryNode localNode) {
}
public void verify(String verificationToken, DiscoveryNode localNode) {}
@Override
public void updateState(final ClusterState state) {
}
public void updateState(final ClusterState state) {}
@Override
public void executeConsistentStateUpdate(Function<RepositoryData, ClusterStateUpdateTask> createUpdateTask, String source,
Consumer<Exception> onFailure) {
public void executeConsistentStateUpdate(
Function<RepositoryData, ClusterStateUpdateTask> createUpdateTask,
String source,
Consumer<Exception> onFailure
) {
throw new UnsupportedOperationException("Unsupported for restore-only repository");
}
@Override
public void cloneShardSnapshot(SnapshotId source, SnapshotId target, RepositoryShardId repositoryShardId, String shardGeneration,
ActionListener<String> listener) {
public void cloneShardSnapshot(
SnapshotId source,
SnapshotId target,
RepositoryShardId repositoryShardId,
String shardGeneration,
ActionListener<String> listener
) {
throw new UnsupportedOperationException("Unsupported for restore-only repository");
}
}

View File

@ -46,9 +46,7 @@ import org.opensearch.test.junit.listeners.ReproduceInfoPrinter;
/**
* Extends Lucene's BaseDirectoryTestCase with OpenSearch test behavior.
*/
@Listeners({
ReproduceInfoPrinter.class
})
@Listeners({ ReproduceInfoPrinter.class })
@TimeoutSuite(millis = TimeUnits.HOUR)
@LuceneTestCase.SuppressReproduceLine
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")

View File

@ -39,15 +39,18 @@ public class TranslogDeletionPolicies {
public static TranslogDeletionPolicy createTranslogDeletionPolicy() {
return new TranslogDeletionPolicy(
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(),
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getDefault(Settings.EMPTY).getMillis(),
IndexSettings.INDEX_TRANSLOG_RETENTION_TOTAL_FILES_SETTING.getDefault(Settings.EMPTY)
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(),
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getDefault(Settings.EMPTY).getMillis(),
IndexSettings.INDEX_TRANSLOG_RETENTION_TOTAL_FILES_SETTING.getDefault(Settings.EMPTY)
);
}
public static TranslogDeletionPolicy createTranslogDeletionPolicy(IndexSettings indexSettings) {
return new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(),
indexSettings.getTranslogRetentionAge().getMillis(), indexSettings.getTranslogRetentionTotalFiles());
return new TranslogDeletionPolicy(
indexSettings.getTranslogRetentionSize().getBytes(),
indexSettings.getTranslogRetentionAge().getMillis(),
indexSettings.getTranslogRetentionTotalFiles()
);
}
}

View File

@ -60,7 +60,7 @@ import static java.util.Collections.emptyMap;
*/
public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase {
private static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
private static final Map<String, Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String, Class<?>>()
// exposed in ES
.put("classic", MovedToAnalysisCommon.class)
.put("edgengram", MovedToAnalysisCommon.class)
@ -80,126 +80,126 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase {
.put("wikipedia", Void.class)
.immutableMap();
static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
static final Map<String, Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String, Class<?>>()
// exposed in ES
.put("apostrophe", MovedToAnalysisCommon.class)
.put("arabicnormalization", MovedToAnalysisCommon.class)
.put("arabicstem", MovedToAnalysisCommon.class)
.put("asciifolding", MovedToAnalysisCommon.class)
.put("bengalinormalization", MovedToAnalysisCommon.class)
.put("bengalistem", MovedToAnalysisCommon.class)
.put("brazilianstem", MovedToAnalysisCommon.class)
.put("bulgarianstem", MovedToAnalysisCommon.class)
.put("cjkbigram", MovedToAnalysisCommon.class)
.put("cjkwidth", MovedToAnalysisCommon.class)
.put("classic", MovedToAnalysisCommon.class)
.put("commongrams", MovedToAnalysisCommon.class)
.put("commongramsquery", MovedToAnalysisCommon.class)
.put("czechstem", MovedToAnalysisCommon.class)
.put("decimaldigit", MovedToAnalysisCommon.class)
.put("delimitedpayload", MovedToAnalysisCommon.class)
.put("dictionarycompoundword", MovedToAnalysisCommon.class)
.put("edgengram", MovedToAnalysisCommon.class)
.put("elision", MovedToAnalysisCommon.class)
.put("englishminimalstem", MovedToAnalysisCommon.class)
.put("englishpossessive", MovedToAnalysisCommon.class)
.put("finnishlightstem", MovedToAnalysisCommon.class)
.put("fixedshingle", MovedToAnalysisCommon.class)
.put("frenchlightstem", MovedToAnalysisCommon.class)
.put("frenchminimalstem", MovedToAnalysisCommon.class)
.put("galicianminimalstem", MovedToAnalysisCommon.class)
.put("galicianstem", MovedToAnalysisCommon.class)
.put("germanstem", MovedToAnalysisCommon.class)
.put("germanlightstem", MovedToAnalysisCommon.class)
.put("germanminimalstem", MovedToAnalysisCommon.class)
.put("germannormalization", MovedToAnalysisCommon.class)
.put("greeklowercase", MovedToAnalysisCommon.class)
.put("greekstem", MovedToAnalysisCommon.class)
.put("hindinormalization", MovedToAnalysisCommon.class)
.put("hindistem", MovedToAnalysisCommon.class)
.put("hungarianlightstem", MovedToAnalysisCommon.class)
.put("hunspellstem", HunspellTokenFilterFactory.class)
.put("hyphenationcompoundword", MovedToAnalysisCommon.class)
.put("indicnormalization", MovedToAnalysisCommon.class)
.put("irishlowercase", MovedToAnalysisCommon.class)
.put("indonesianstem", MovedToAnalysisCommon.class)
.put("italianlightstem", MovedToAnalysisCommon.class)
.put("keepword", MovedToAnalysisCommon.class)
.put("keywordmarker", MovedToAnalysisCommon.class)
.put("kstem", MovedToAnalysisCommon.class)
.put("latvianstem", MovedToAnalysisCommon.class)
.put("length", MovedToAnalysisCommon.class)
.put("limittokencount", MovedToAnalysisCommon.class)
.put("lowercase", MovedToAnalysisCommon.class)
.put("ngram", MovedToAnalysisCommon.class)
.put("norwegianlightstem", MovedToAnalysisCommon.class)
.put("norwegianminimalstem", MovedToAnalysisCommon.class)
.put("patterncapturegroup", MovedToAnalysisCommon.class)
.put("patternreplace", MovedToAnalysisCommon.class)
.put("persiannormalization", MovedToAnalysisCommon.class)
.put("porterstem", MovedToAnalysisCommon.class)
.put("portuguesestem", MovedToAnalysisCommon.class)
.put("portugueselightstem", MovedToAnalysisCommon.class)
.put("portugueseminimalstem", MovedToAnalysisCommon.class)
.put("reversestring", MovedToAnalysisCommon.class)
.put("russianlightstem", MovedToAnalysisCommon.class)
.put("scandinavianfolding", MovedToAnalysisCommon.class)
.put("apostrophe", MovedToAnalysisCommon.class)
.put("arabicnormalization", MovedToAnalysisCommon.class)
.put("arabicstem", MovedToAnalysisCommon.class)
.put("asciifolding", MovedToAnalysisCommon.class)
.put("bengalinormalization", MovedToAnalysisCommon.class)
.put("bengalistem", MovedToAnalysisCommon.class)
.put("brazilianstem", MovedToAnalysisCommon.class)
.put("bulgarianstem", MovedToAnalysisCommon.class)
.put("cjkbigram", MovedToAnalysisCommon.class)
.put("cjkwidth", MovedToAnalysisCommon.class)
.put("classic", MovedToAnalysisCommon.class)
.put("commongrams", MovedToAnalysisCommon.class)
.put("commongramsquery", MovedToAnalysisCommon.class)
.put("czechstem", MovedToAnalysisCommon.class)
.put("decimaldigit", MovedToAnalysisCommon.class)
.put("delimitedpayload", MovedToAnalysisCommon.class)
.put("dictionarycompoundword", MovedToAnalysisCommon.class)
.put("edgengram", MovedToAnalysisCommon.class)
.put("elision", MovedToAnalysisCommon.class)
.put("englishminimalstem", MovedToAnalysisCommon.class)
.put("englishpossessive", MovedToAnalysisCommon.class)
.put("finnishlightstem", MovedToAnalysisCommon.class)
.put("fixedshingle", MovedToAnalysisCommon.class)
.put("frenchlightstem", MovedToAnalysisCommon.class)
.put("frenchminimalstem", MovedToAnalysisCommon.class)
.put("galicianminimalstem", MovedToAnalysisCommon.class)
.put("galicianstem", MovedToAnalysisCommon.class)
.put("germanstem", MovedToAnalysisCommon.class)
.put("germanlightstem", MovedToAnalysisCommon.class)
.put("germanminimalstem", MovedToAnalysisCommon.class)
.put("germannormalization", MovedToAnalysisCommon.class)
.put("greeklowercase", MovedToAnalysisCommon.class)
.put("greekstem", MovedToAnalysisCommon.class)
.put("hindinormalization", MovedToAnalysisCommon.class)
.put("hindistem", MovedToAnalysisCommon.class)
.put("hungarianlightstem", MovedToAnalysisCommon.class)
.put("hunspellstem", HunspellTokenFilterFactory.class)
.put("hyphenationcompoundword", MovedToAnalysisCommon.class)
.put("indicnormalization", MovedToAnalysisCommon.class)
.put("irishlowercase", MovedToAnalysisCommon.class)
.put("indonesianstem", MovedToAnalysisCommon.class)
.put("italianlightstem", MovedToAnalysisCommon.class)
.put("keepword", MovedToAnalysisCommon.class)
.put("keywordmarker", MovedToAnalysisCommon.class)
.put("kstem", MovedToAnalysisCommon.class)
.put("latvianstem", MovedToAnalysisCommon.class)
.put("length", MovedToAnalysisCommon.class)
.put("limittokencount", MovedToAnalysisCommon.class)
.put("lowercase", MovedToAnalysisCommon.class)
.put("ngram", MovedToAnalysisCommon.class)
.put("norwegianlightstem", MovedToAnalysisCommon.class)
.put("norwegianminimalstem", MovedToAnalysisCommon.class)
.put("patterncapturegroup", MovedToAnalysisCommon.class)
.put("patternreplace", MovedToAnalysisCommon.class)
.put("persiannormalization", MovedToAnalysisCommon.class)
.put("porterstem", MovedToAnalysisCommon.class)
.put("portuguesestem", MovedToAnalysisCommon.class)
.put("portugueselightstem", MovedToAnalysisCommon.class)
.put("portugueseminimalstem", MovedToAnalysisCommon.class)
.put("reversestring", MovedToAnalysisCommon.class)
.put("russianlightstem", MovedToAnalysisCommon.class)
.put("scandinavianfolding", MovedToAnalysisCommon.class)
.put("scandinaviannormalization", MovedToAnalysisCommon.class)
.put("serbiannormalization", MovedToAnalysisCommon.class)
.put("shingle", ShingleTokenFilterFactory.class)
.put("minhash", MovedToAnalysisCommon.class)
.put("snowballporter", MovedToAnalysisCommon.class)
.put("soraninormalization", MovedToAnalysisCommon.class)
.put("soranistem", MovedToAnalysisCommon.class)
.put("spanishlightstem", MovedToAnalysisCommon.class)
.put("stemmeroverride", MovedToAnalysisCommon.class)
.put("stop", StopTokenFilterFactory.class)
.put("swedishlightstem", MovedToAnalysisCommon.class)
.put("synonym", MovedToAnalysisCommon.class)
.put("synonymgraph", MovedToAnalysisCommon.class)
.put("trim", MovedToAnalysisCommon.class)
.put("truncate", MovedToAnalysisCommon.class)
.put("turkishlowercase", MovedToAnalysisCommon.class)
.put("type", MovedToAnalysisCommon.class)
.put("uppercase", MovedToAnalysisCommon.class)
.put("worddelimiter", MovedToAnalysisCommon.class)
.put("worddelimitergraph", MovedToAnalysisCommon.class)
.put("flattengraph", MovedToAnalysisCommon.class)
.put("serbiannormalization", MovedToAnalysisCommon.class)
.put("shingle", ShingleTokenFilterFactory.class)
.put("minhash", MovedToAnalysisCommon.class)
.put("snowballporter", MovedToAnalysisCommon.class)
.put("soraninormalization", MovedToAnalysisCommon.class)
.put("soranistem", MovedToAnalysisCommon.class)
.put("spanishlightstem", MovedToAnalysisCommon.class)
.put("stemmeroverride", MovedToAnalysisCommon.class)
.put("stop", StopTokenFilterFactory.class)
.put("swedishlightstem", MovedToAnalysisCommon.class)
.put("synonym", MovedToAnalysisCommon.class)
.put("synonymgraph", MovedToAnalysisCommon.class)
.put("trim", MovedToAnalysisCommon.class)
.put("truncate", MovedToAnalysisCommon.class)
.put("turkishlowercase", MovedToAnalysisCommon.class)
.put("type", MovedToAnalysisCommon.class)
.put("uppercase", MovedToAnalysisCommon.class)
.put("worddelimiter", MovedToAnalysisCommon.class)
.put("worddelimitergraph", MovedToAnalysisCommon.class)
.put("flattengraph", MovedToAnalysisCommon.class)
// TODO: these tokenfilters are not yet exposed: useful?
// suggest stop
.put("suggeststop", Void.class)
.put("suggeststop", Void.class)
// capitalizes tokens
.put("capitalization", Void.class)
.put("capitalization", Void.class)
// like length filter (but codepoints)
.put("codepointcount", Void.class)
.put("codepointcount", Void.class)
// puts hyphenated words back together
.put("hyphenatedwords", Void.class)
.put("hyphenatedwords", Void.class)
// repeats anything marked as keyword
.put("keywordrepeat", Void.class)
.put("keywordrepeat", Void.class)
// like limittokencount, but by offset
.put("limittokenoffset", Void.class)
.put("limittokenoffset", Void.class)
// like limittokencount, but by position
.put("limittokenposition", Void.class)
.put("limittokenposition", Void.class)
// ???
.put("numericpayload", Void.class)
.put("numericpayload", Void.class)
// removes duplicates at the same position (this should be used by the existing factory)
.put("removeduplicates", Void.class)
.put("removeduplicates", Void.class)
// ???
.put("tokenoffsetpayload", Void.class)
.put("tokenoffsetpayload", Void.class)
// puts the type into the payload
.put("typeaspayload", Void.class)
.put("typeaspayload", Void.class)
// puts the type as a synonym
.put("typeassynonym", Void.class)
.put("typeassynonym", Void.class)
// fingerprint
.put("fingerprint", Void.class)
.put("fingerprint", Void.class)
// for tee-sinks
.put("daterecognizer", Void.class)
.put("daterecognizer", Void.class)
// for token filters that generate bad offsets, which are now rejected since Lucene 7
.put("fixbrokenoffsets", Void.class)
.put("fixbrokenoffsets", Void.class)
// should we expose it, or maybe think about higher level integration of the
// fake term frequency feature (LUCENE-7854)
.put("delimitedtermfrequency", Void.class)
.put("delimitedtermfrequency", Void.class)
// LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip
// particular token filters based on the attributes of the current token.
.put("protectedterm", Void.class)
@ -216,16 +216,16 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase {
.put("telugunormalization", Void.class)
.immutableMap();
static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
static final Map<String, Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String, Class<?>>()
// exposed in ES
.put("htmlstrip", MovedToAnalysisCommon.class)
.put("mapping", MovedToAnalysisCommon.class)
.put("htmlstrip", MovedToAnalysisCommon.class)
.put("mapping", MovedToAnalysisCommon.class)
.put("patternreplace", MovedToAnalysisCommon.class)
// TODO: these charfilters are not yet exposed: useful?
// handling of zwnj for persian
.put("persian", Void.class)
.put("cjkwidth", Void.class)
.put("persian", Void.class)
.put("cjkwidth", Void.class)
.immutableMap();
/**
@ -290,24 +290,36 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase {
public void testTokenizers() {
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.addAll(
org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers()
.stream()
.map(key -> key.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet())
);
missing.removeAll(getTokenizers().keySet());
assertTrue("new tokenizers found, please update KNOWN_TOKENIZERS: " + missing.toString(), missing.isEmpty());
}
public void testCharFilters() {
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.addAll(
org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters()
.stream()
.map(key -> key.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet())
);
missing.removeAll(getCharFilters().keySet());
assertTrue("new charfilters found, please update KNOWN_CHARFILTERS: " + missing.toString(), missing.isEmpty());
}
public void testTokenFilters() {
Set<String> missing = new TreeSet<String>();
missing.addAll(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters()
.stream().map(key -> key.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()));
missing.addAll(
org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters()
.stream()
.map(key -> key.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet())
);
missing.removeAll(getTokenFilters().keySet());
assertTrue("new tokenfilters found, please update KNOWN_TOKENFILTERS: " + missing.toString(), missing.isEmpty());
}

View File

@ -73,29 +73,68 @@ public class AsyncRecoveryTarget implements RecoveryTargetHandler {
}
@Override
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps,
long maxSeenAutoIdTimestampOnPrimary, long maxSeqNoOfDeletesOrUpdatesOnPrimary,
RetentionLeases retentionLeases, long mappingVersionOnPrimary, ActionListener<Long> listener) {
executor.execute(() -> target.indexTranslogOperations(operations, totalTranslogOps, maxSeenAutoIdTimestampOnPrimary,
maxSeqNoOfDeletesOrUpdatesOnPrimary, retentionLeases, mappingVersionOnPrimary, listener));
public void indexTranslogOperations(
List<Translog.Operation> operations,
int totalTranslogOps,
long maxSeenAutoIdTimestampOnPrimary,
long maxSeqNoOfDeletesOrUpdatesOnPrimary,
RetentionLeases retentionLeases,
long mappingVersionOnPrimary,
ActionListener<Long> listener
) {
executor.execute(
() -> target.indexTranslogOperations(
operations,
totalTranslogOps,
maxSeenAutoIdTimestampOnPrimary,
maxSeqNoOfDeletesOrUpdatesOnPrimary,
retentionLeases,
mappingVersionOnPrimary,
listener
)
);
}
@Override
public void receiveFileInfo(List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames,
List<Long> phase1ExistingFileSizes, int totalTranslogOps, ActionListener<Void> listener) {
executor.execute(() -> target.receiveFileInfo(
phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, totalTranslogOps, listener));
public void receiveFileInfo(
List<String> phase1FileNames,
List<Long> phase1FileSizes,
List<String> phase1ExistingFileNames,
List<Long> phase1ExistingFileSizes,
int totalTranslogOps,
ActionListener<Void> listener
) {
executor.execute(
() -> target.receiveFileInfo(
phase1FileNames,
phase1FileSizes,
phase1ExistingFileNames,
phase1ExistingFileSizes,
totalTranslogOps,
listener
)
);
}
@Override
public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetadata,
ActionListener<Void> listener) {
public void cleanFiles(
int totalTranslogOps,
long globalCheckpoint,
Store.MetadataSnapshot sourceMetadata,
ActionListener<Void> listener
) {
executor.execute(() -> target.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetadata, listener));
}
@Override
public void writeFileChunk(StoreFileMetadata fileMetadata, long position, BytesReference content,
boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
public void writeFileChunk(
StoreFileMetadata fileMetadata,
long position,
BytesReference content,
boolean lastChunk,
int totalTranslogOps,
ActionListener<Void> listener
) {
final BytesReference copy = new BytesArray(BytesRef.deepCopyOf(content.toBytesRef()));
executor.execute(() -> target.writeFileChunk(fileMetadata, position, copy, lastChunk, totalTranslogOps, listener));
}

View File

@ -44,8 +44,8 @@ public class IngestDocumentMatcher {
* @param docB second document to compare
*/
public static void assertIngestDocument(IngestDocument docA, IngestDocument docB) {
if ((deepEquals(docA.getIngestMetadata(), docB.getIngestMetadata(), true) &&
deepEquals(docA.getSourceAndMetadata(), docB.getSourceAndMetadata(), false)) == false) {
if ((deepEquals(docA.getIngestMetadata(), docB.getIngestMetadata(), true)
&& deepEquals(docA.getSourceAndMetadata(), docB.getSourceAndMetadata(), false)) == false) {
throw new AssertionError("Expected [" + docA + "] but received [" + docB + "].");
}
}
@ -63,8 +63,7 @@ public class IngestDocumentMatcher {
for (Map.Entry<?, ?> entry : mapA.entrySet()) {
Object key = entry.getKey();
// Don't compare the timestamp of ingest metadata since it will differ between executions
if ((isIngestMeta && "timestamp".equals(key)) == false
&& deepEquals(entry.getValue(), mapB.get(key), false) == false) {
if ((isIngestMeta && "timestamp".equals(key)) == false && deepEquals(entry.getValue(), mapB.get(key), false) == false) {
return false;
}
}

View File

@ -44,8 +44,9 @@ import org.opensearch.plugins.Plugin;
public class IngestTestPlugin extends Plugin implements IngestPlugin {
@Override
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
return Collections.singletonMap("test", (factories, tag, description, config) ->
new TestProcessor("id", "test", "description", doc -> {
return Collections.singletonMap(
"test",
(factories, tag, description, config) -> new TestProcessor("id", "test", "description", doc -> {
doc.setFieldValue("processed", true);
if (doc.hasField("fail") && doc.getFieldValue("fail", Boolean.class)) {
throw new IllegalArgumentException("test processor failed");
@ -54,6 +55,7 @@ public class IngestTestPlugin extends Plugin implements IngestPlugin {
return null;
}
return doc;
}));
})
);
}
}

View File

@ -57,7 +57,7 @@ public final class RandomDocumentPicks {
public static String randomFieldName(Random random) {
int numLevels = RandomNumbers.randomIntBetween(random, 1, 5);
StringBuilder fieldName = new StringBuilder();
for (int i = 0; i < numLevels-1; i++) {
for (int i = 0; i < numLevels - 1; i++) {
if (i > 0) {
fieldName.append('.');
}
@ -153,8 +153,10 @@ public final class RandomDocumentPicks {
String id = randomString(random);
String routing = null;
Long version = randomNonNegtiveLong(random);
VersionType versionType = RandomPicks.randomFrom(random,
new VersionType[]{VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE});
VersionType versionType = RandomPicks.randomFrom(
random,
new VersionType[] { VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE }
);
if (random.nextBoolean()) {
routing = randomString(random);
}
@ -175,7 +177,7 @@ public final class RandomDocumentPicks {
}
private static Object randomFieldValue(Random random, int currentDepth) {
switch(RandomNumbers.randomIntBetween(random, 0, 9)) {
switch (RandomNumbers.randomIntBetween(random, 0, 9)) {
case 0:
return randomString(random);
case 1:

View File

@ -102,8 +102,12 @@ public class TestProcessor implements Processor {
public static final class Factory implements Processor.Factory {
@Override
public TestProcessor create(Map<String, Processor.Factory> registry, String processorTag,
String description, Map<String, Object> config) throws Exception {
public TestProcessor create(
Map<String, Processor.Factory> registry,
String processorTag,
String description,
Map<String, Object> config
) throws Exception {
return new TestProcessor(processorTag, "test-processor", description, ingestDocument -> {});
}
}

View File

@ -69,7 +69,6 @@ public class TestTemplateService extends ScriptService {
}
}
public static class MockTemplateScript extends TemplateScript {
private final String expected;

View File

@ -34,7 +34,7 @@ import java.util.function.Function;
public class PriviledgedMockMaker implements MockMaker {
private static AccessControlContext context;
private final ByteBuddyMockMaker delegate;
/**
* Create dedicated AccessControlContext to use the Mockito protection domain (test only)
* so to relax the security constraints for the test cases which rely on mocks. This plugin
@ -42,23 +42,17 @@ public class PriviledgedMockMaker implements MockMaker {
* since Mockito does not support SecurityManager out of the box. The method has to be called by
* test framework before the SecurityManager is being set, otherwise additional permissions have
* to be granted to the caller:
*
*
* permission java.security.Permission "createAccessControlContext"
*
*
*/
public static void createAccessControlContext() {
// This combiner, if bound to an access control context, will unconditionally
// substitute the call chain protection domains with the 'mockito-core' one if it
// is present. The security checks are relaxed intentionally to trust mocking
// implementation if it is part of the call chain.
final DomainCombiner combiner = (current, assigned) -> Arrays
.stream(current)
.filter(pd ->
pd
.getCodeSource()
.getLocation()
.getFile()
.contains("mockito-core") /* check mockito-core only */)
final DomainCombiner combiner = (current, assigned) -> Arrays.stream(current)
.filter(pd -> pd.getCodeSource().getLocation().getFile().contains("mockito-core") /* check mockito-core only */)
.findAny()
.map(pd -> new ProtectionDomain[] { pd })
.orElse(current);
@ -67,39 +61,31 @@ public class PriviledgedMockMaker implements MockMaker {
final AccessControlContext wrapper = new AccessControlContext(AccessController.getContext(), combiner);
// Create new access control context with dedicated combiner
context = AccessController.doPrivileged(
(PrivilegedAction<AccessControlContext>) AccessController::getContext,
wrapper);
context = AccessController.doPrivileged((PrivilegedAction<AccessControlContext>) AccessController::getContext, wrapper);
}
/**
* Construct an instance of the priviledged mock maker using ByteBuddyMockMaker under the hood.
*/
public PriviledgedMockMaker() {
delegate = AccessController.doPrivileged(
(PrivilegedAction<ByteBuddyMockMaker>) () -> new ByteBuddyMockMaker(),
context);
delegate = AccessController.doPrivileged((PrivilegedAction<ByteBuddyMockMaker>) () -> new ByteBuddyMockMaker(), context);
}
@SuppressWarnings("rawtypes")
@Override
public <T> T createMock(MockCreationSettings<T> settings, MockHandler handler) {
return AccessController.doPrivileged(
(PrivilegedAction<T>) () -> delegate.createMock(settings, handler),
context);
return AccessController.doPrivileged((PrivilegedAction<T>) () -> delegate.createMock(settings, handler), context);
}
@SuppressWarnings("rawtypes")
@Override
public <T> Optional<T> createSpy(MockCreationSettings<T> settings, MockHandler handler, T object) {
// The ByteBuddyMockMaker does not implement createSpy and relies on Mockito's fallback
return AccessController.doPrivileged(
(PrivilegedAction<Optional<T> >) () -> {
T instance = delegate.createMock(settings, handler);
new LenientCopyTool().copyToMock(object, instance);
return Optional.of(instance);
},
context);
return AccessController.doPrivileged((PrivilegedAction<Optional<T>>) () -> {
T instance = delegate.createMock(settings, handler);
new LenientCopyTool().copyToMock(object, instance);
return Optional.of(instance);
}, context);
}
@SuppressWarnings("rawtypes")
@ -111,11 +97,10 @@ public class PriviledgedMockMaker implements MockMaker {
@SuppressWarnings("rawtypes")
@Override
public void resetMock(Object mock, MockHandler newHandler, MockCreationSettings settings) {
AccessController.doPrivileged(
(PrivilegedAction<Void>) () -> {
delegate.resetMock(mock, newHandler, settings);
return null;
}, context);
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
delegate.resetMock(mock, newHandler, settings);
return null;
}, context);
}
@Override
@ -131,10 +116,12 @@ public class PriviledgedMockMaker implements MockMaker {
}
@Override
public <T> ConstructionMockControl<T> createConstructionMock(Class<T> type,
Function<MockedConstruction.Context, MockCreationSettings<T>> settingsFactory,
Function<MockedConstruction.Context, MockHandler<T>> handlerFactory,
MockedConstruction.MockInitializer<T> mockInitializer) {
public <T> ConstructionMockControl<T> createConstructionMock(
Class<T> type,
Function<MockedConstruction.Context, MockCreationSettings<T>> settingsFactory,
Function<MockedConstruction.Context, MockHandler<T>> handlerFactory,
MockedConstruction.MockInitializer<T> mockInitializer
) {
return delegate.createConstructionMock(type, settingsFactory, handlerFactory, mockInitializer);
}

View File

@ -88,27 +88,31 @@ public class MockNode extends Node {
}
public MockNode(
final Settings settings,
final Collection<Class<? extends Plugin>> classpathPlugins,
final boolean forbidPrivateIndexSettings) {
final Settings settings,
final Collection<Class<? extends Plugin>> classpathPlugins,
final boolean forbidPrivateIndexSettings
) {
this(settings, classpathPlugins, null, forbidPrivateIndexSettings);
}
public MockNode(
final Settings settings,
final Collection<Class<? extends Plugin>> classpathPlugins,
final Path configPath,
final boolean forbidPrivateIndexSettings) {
final Settings settings,
final Collection<Class<? extends Plugin>> classpathPlugins,
final Path configPath,
final boolean forbidPrivateIndexSettings
) {
this(
InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath, () -> "mock_ node"),
classpathPlugins,
forbidPrivateIndexSettings);
InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath, () -> "mock_ node"),
classpathPlugins,
forbidPrivateIndexSettings
);
}
private MockNode(
final Environment environment,
final Collection<Class<? extends Plugin>> classpathPlugins,
final boolean forbidPrivateIndexSettings) {
final Environment environment,
final Collection<Class<? extends Plugin>> classpathPlugins,
final boolean forbidPrivateIndexSettings
) {
super(environment, classpathPlugins, forbidPrivateIndexSettings);
this.classpathPlugins = classpathPlugins;
}
@ -136,18 +140,38 @@ public class MockNode extends Node {
return new MockPageCacheRecycler(settings);
}
@Override
protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
FetchPhase fetchPhase, ResponseCollectorService responseCollectorService,
CircuitBreakerService circuitBreakerService) {
protected SearchService newSearchService(
ClusterService clusterService,
IndicesService indicesService,
ThreadPool threadPool,
ScriptService scriptService,
BigArrays bigArrays,
FetchPhase fetchPhase,
ResponseCollectorService responseCollectorService,
CircuitBreakerService circuitBreakerService
) {
if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) {
return super.newSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase,
responseCollectorService, circuitBreakerService);
return super.newSearchService(
clusterService,
indicesService,
threadPool,
scriptService,
bigArrays,
fetchPhase,
responseCollectorService,
circuitBreakerService
);
}
return new MockSearchService(clusterService, indicesService, threadPool, scriptService,
bigArrays, fetchPhase, circuitBreakerService);
return new MockSearchService(
clusterService,
indicesService,
threadPool,
scriptService,
bigArrays,
fetchPhase,
circuitBreakerService
);
}
@Override
@ -159,10 +183,15 @@ public class MockNode extends Node {
}
@Override
protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool,
TransportInterceptor interceptor,
Function<BoundTransportAddress, DiscoveryNode> localNodeFactory,
ClusterSettings clusterSettings, Set<String> taskHeaders) {
protected TransportService newTransportService(
Settings settings,
Transport transport,
ThreadPool threadPool,
TransportInterceptor interceptor,
Function<BoundTransportAddress, DiscoveryNode> localNodeFactory,
ClusterSettings clusterSettings,
Set<String> taskHeaders
) {
// we use the MockTransportService.TestPlugin class as a marker to create a network
// module with this MockNetworkService. NetworkService is such an integral part of the systme
// we don't allow to plug it in from plugins or anything. this is a test-only override and
@ -182,8 +211,12 @@ public class MockNode extends Node {
}
@Override
protected ClusterInfoService newClusterInfoService(Settings settings, ClusterService clusterService,
ThreadPool threadPool, NodeClient client) {
protected ClusterInfoService newClusterInfoService(
Settings settings,
ClusterService clusterService,
ThreadPool threadPool,
NodeClient client
) {
if (getPluginsService().filterPlugins(MockInternalClusterInfoService.TestPlugin.class).isEmpty()) {
return super.newClusterInfoService(settings, clusterService, threadPool, client);
} else {
@ -204,6 +237,6 @@ public class MockNode extends Node {
@Override
protected void configureNodeAndClusterIdStateListener(ClusterService clusterService) {
//do not configure this in tests as this is causing SetOnce to throw exceptions when jvm is used for multiple tests
// do not configure this in tests as this is causing SetOnce to throw exceptions when jvm is used for multiple tests
}
}

View File

@ -49,8 +49,12 @@ public class RecoverySettingsChunkSizePlugin extends Plugin {
/**
* The chunk size. Only exposed by tests.
*/
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.chunk_size",
RecoverySettings.DEFAULT_CHUNK_SIZE, Property.Dynamic, Property.NodeScope);
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
"indices.recovery.chunk_size",
RecoverySettings.DEFAULT_CHUNK_SIZE,
Property.Dynamic,
Property.NodeScope
);
@Override
public List<Setting<?>> getSettings() {

View File

@ -49,7 +49,7 @@ public class PluginTestUtil {
private static void writeProperties(Path propertiesFile, String... stringProps) throws IOException {
assert stringProps.length % 2 == 0;
Files.createDirectories(propertiesFile.getParent());
Properties properties = new Properties();
Properties properties = new Properties();
for (int i = 0; i < stringProps.length; i += 2) {
properties.put(stringProps[i], stringProps[i + 1]);
}

View File

@ -68,10 +68,7 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends OpenSearchSin
@Override
protected Settings nodeSettings() {
return Settings.builder()
.put(super.nodeSettings())
.setSecureSettings(credentials())
.build();
return Settings.builder().put(super.nodeSettings()).setSecureSettings(credentials()).build();
}
protected abstract SecureSettings credentials();
@ -129,24 +126,17 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends OpenSearchSin
.setIndices("test-idx-*", "-test-idx-3")
.get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(
createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())
);
assertThat(client().admin()
.cluster()
.prepareGetSnapshots("test-repo")
.setSnapshots(snapshotName)
.get()
.getSnapshots()
.get(0)
.state(),
equalTo(SnapshotState.SUCCESS));
assertThat(
client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots(snapshotName).get().getSnapshots().get(0).state(),
equalTo(SnapshotState.SUCCESS)
);
assertTrue(client().admin()
.cluster()
.prepareDeleteSnapshot("test-repo", snapshotName)
.get()
.isAcknowledged());
assertTrue(client().admin().cluster().prepareDeleteSnapshot("test-repo", snapshotName).get().isAcknowledged());
}
public void testListChildren() throws Exception {
@ -167,8 +157,11 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends OpenSearchSin
assertChildren(repo.basePath(), Collections.singleton("foo"));
assertBlobsByPrefix(repo.basePath(), "fo", Collections.emptyMap());
assertChildren(repo.basePath().add("foo"), Arrays.asList("nested", "nested2"));
assertBlobsByPrefix(repo.basePath().add("foo"), "nest",
Collections.singletonMap("nested-blob", new PlainBlobMetadata("nested-blob", testBlobLen)));
assertBlobsByPrefix(
repo.basePath().add("foo"),
"nest",
Collections.singletonMap("nested-blob", new PlainBlobMetadata("nested-blob", testBlobLen))
);
assertChildren(repo.basePath().add("foo").add("nested"), Collections.emptyList());
if (randomBoolean()) {
deleteAndAssertEmpty(repo.basePath());
@ -205,20 +198,17 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends OpenSearchSin
.setIndices("test-idx-*", "-test-idx-3")
.get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(
createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())
);
assertThat(client().admin()
.cluster()
.prepareGetSnapshots("test-repo")
.setSnapshots(snapshotName)
.get()
.getSnapshots().get(0)
.state(),
equalTo(SnapshotState.SUCCESS));
assertThat(
client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots(snapshotName).get().getSnapshots().get(0).state(),
equalTo(SnapshotState.SUCCESS)
);
final BlobStoreRepository repo =
(BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo");
final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo");
final Executor genericExec = repo.threadPool().executor(ThreadPool.Names.GENERIC);
logger.info("--> creating a dangling index folder");
@ -290,8 +280,9 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends OpenSearchSin
private Set<String> listChildren(BlobPath path) {
final PlainActionFuture<Set<String>> future = PlainActionFuture.newFuture();
final BlobStoreRepository repository = getRepository();
repository.threadPool().generic().execute(
ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).children().keySet()));
repository.threadPool()
.generic()
.execute(ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).children().keySet()));
return future.actionGet();
}

View File

@ -97,28 +97,32 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
protected abstract Class<? extends Exception> unresponsiveExceptionType();
protected abstract BlobContainer createBlobContainer(@Nullable Integer maxRetries,
protected abstract BlobContainer createBlobContainer(
@Nullable Integer maxRetries,
@Nullable TimeValue readTimeout,
@Nullable Boolean disableChunkedEncoding,
@Nullable ByteSizeValue bufferSize);
@Nullable ByteSizeValue bufferSize
);
public void testReadNonexistentBlobThrowsNoSuchFileException() {
final BlobContainer blobContainer = createBlobContainer(between(1, 5), null, null, null);
final long position = randomLongBetween(0, MAX_RANGE_VAL);
final int length = randomIntBetween(1, Math.toIntExact(Math.min(Integer.MAX_VALUE, MAX_RANGE_VAL - position)));
final Exception exception = expectThrows(
NoSuchFileException.class,
() -> {
if (randomBoolean()) {
Streams.readFully(blobContainer.readBlob("read_nonexistent_blob"));
} else {
Streams.readFully(blobContainer.readBlob("read_nonexistent_blob", 0, 1));
}
});
final Exception exception = expectThrows(NoSuchFileException.class, () -> {
if (randomBoolean()) {
Streams.readFully(blobContainer.readBlob("read_nonexistent_blob"));
} else {
Streams.readFully(blobContainer.readBlob("read_nonexistent_blob", 0, 1));
}
});
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found"));
assertThat(expectThrows(NoSuchFileException.class,
() -> Streams.readFully(blobContainer.readBlob("read_nonexistent_blob", position, length)))
.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found"));
assertThat(
expectThrows(
NoSuchFileException.class,
() -> Streams.readFully(blobContainer.readBlob("read_nonexistent_blob", position, length))
).getMessage().toLowerCase(Locale.ROOT),
containsString("blob object [read_nonexistent_blob] not found")
);
}
public void testReadBlobWithRetries() throws Exception {
@ -138,8 +142,15 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
return;
}
if (randomBoolean()) {
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
exchange.sendResponseHeaders(
randomFrom(
HttpStatus.SC_INTERNAL_SERVER_ERROR,
HttpStatus.SC_BAD_GATEWAY,
HttpStatus.SC_SERVICE_UNAVAILABLE,
HttpStatus.SC_GATEWAY_TIMEOUT
),
-1
);
} else if (randomBoolean()) {
sendIncompleteContent(exchange, bytes);
}
@ -162,8 +173,7 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
wrappedStream = inputStream;
}
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream));
logger.info("maxRetries={}, readLimit={}, byteSize={}, bytesRead={}",
maxRetries, readLimit, bytes.length, bytesRead.length);
logger.info("maxRetries={}, readLimit={}, byteSize={}, bytesRead={}", maxRetries, readLimit, bytes.length, bytesRead.length);
assertArrayEquals(Arrays.copyOfRange(bytes, 0, readLimit), bytesRead);
if (readLimit < bytes.length) {
// we might have completed things based on an incomplete response, and we're happy with that
@ -196,8 +206,15 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
return;
}
if (randomBoolean()) {
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
exchange.sendResponseHeaders(
randomFrom(
HttpStatus.SC_INTERNAL_SERVER_ERROR,
HttpStatus.SC_BAD_GATEWAY,
HttpStatus.SC_SERVICE_UNAVAILABLE,
HttpStatus.SC_GATEWAY_TIMEOUT
),
-1
);
} else if (randomBoolean()) {
sendIncompleteContent(exchange, bytes);
}
@ -222,8 +239,15 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
wrappedStream = inputStream;
}
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream));
logger.info("maxRetries={}, position={}, length={}, readLimit={}, byteSize={}, bytesRead={}",
maxRetries, position, length, readLimit, bytes.length, bytesRead.length);
logger.info(
"maxRetries={}, position={}, length={}, readLimit={}, byteSize={}, bytesRead={}",
maxRetries,
position,
length,
readLimit,
bytes.length,
bytesRead.length
);
assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + readLimit)), bytesRead);
if (readLimit == 0 || (readLimit < length && readLimit == bytesRead.length)) {
// we might have completed things based on an incomplete response, and we're happy with that
@ -241,8 +265,10 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
// HTTP server does not send a response
httpServer.createContext(downloadStorageEndpoint("read_blob_unresponsive"), exchange -> {});
Exception exception = expectThrows(unresponsiveExceptionType(),
() -> Streams.readFully(blobContainer.readBlob("read_blob_unresponsive")));
Exception exception = expectThrows(
unresponsiveExceptionType(),
() -> Streams.readFully(blobContainer.readBlob("read_blob_unresponsive"))
);
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class));
@ -253,17 +279,25 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
final int position = randomIntBetween(0, bytes.length - 1);
final int length = randomIntBetween(1, randomBoolean() ? bytes.length : Integer.MAX_VALUE);
exception = expectThrows(Exception.class, () -> {
try (InputStream stream = randomBoolean() ?
blobContainer.readBlob("read_blob_incomplete") :
blobContainer.readBlob("read_blob_incomplete", position, length)) {
try (
InputStream stream = randomBoolean()
? blobContainer.readBlob("read_blob_incomplete")
: blobContainer.readBlob("read_blob_incomplete", position, length)
) {
Streams.readFully(stream);
}
});
assertThat(exception, either(instanceOf(SocketTimeoutException.class)).or(instanceOf(ConnectionClosedException.class))
.or(instanceOf(RuntimeException.class)));
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), either(containsString("read timed out")).or(
containsString("premature end of chunk coded message body: closing chunk expected")).or(containsString("Read timed out"))
.or(containsString("unexpected end of file from server")));
assertThat(
exception,
either(instanceOf(SocketTimeoutException.class)).or(instanceOf(ConnectionClosedException.class))
.or(instanceOf(RuntimeException.class))
);
assertThat(
exception.getMessage().toLowerCase(Locale.ROOT),
either(containsString("read timed out")).or(containsString("premature end of chunk coded message body: closing chunk expected"))
.or(containsString("Read timed out"))
.or(containsString("unexpected end of file from server"))
);
assertThat(exception.getSuppressed().length, equalTo(maxRetries));
}
@ -274,16 +308,17 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
// HTTP server closes connection immediately
httpServer.createContext(downloadStorageEndpoint("read_blob_no_response"), HttpExchange::close);
Exception exception = expectThrows(unresponsiveExceptionType(),
() -> {
if (randomBoolean()) {
Streams.readFully(blobContainer.readBlob("read_blob_no_response"));
} else {
Streams.readFully(blobContainer.readBlob("read_blob_no_response", 0, 1));
}
});
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), either(containsString("the target server failed to respond"))
.or(containsString("unexpected end of file from server")));
Exception exception = expectThrows(unresponsiveExceptionType(), () -> {
if (randomBoolean()) {
Streams.readFully(blobContainer.readBlob("read_blob_no_response"));
} else {
Streams.readFully(blobContainer.readBlob("read_blob_no_response", 0, 1));
}
});
assertThat(
exception.getMessage().toLowerCase(Locale.ROOT),
either(containsString("the target server failed to respond")).or(containsString("unexpected end of file from server"))
);
}
public void testReadBlobWithPrematureConnectionClose() {
@ -298,16 +333,20 @@ public abstract class AbstractBlobContainerRetriesTestCase extends OpenSearchTes
});
final Exception exception = expectThrows(Exception.class, () -> {
try (InputStream stream = randomBoolean() ?
blobContainer.readBlob("read_blob_incomplete", 0, 1):
blobContainer.readBlob("read_blob_incomplete")) {
try (
InputStream stream = randomBoolean()
? blobContainer.readBlob("read_blob_incomplete", 0, 1)
: blobContainer.readBlob("read_blob_incomplete")
) {
Streams.readFully(stream);
}
});
assertThat(exception.getMessage().toLowerCase(Locale.ROOT),
either(containsString("premature end of chunk coded message body: closing chunk expected"))
.or(containsString("premature end of content-length delimited message body"))
.or(containsString("connection closed prematurely")));
assertThat(
exception.getMessage().toLowerCase(Locale.ROOT),
either(containsString("premature end of chunk coded message body: closing chunk expected")).or(
containsString("premature end of content-length delimited message body")
).or(containsString("connection closed prematurely"))
);
assertThat(exception.getSuppressed().length, equalTo(Math.min(10, maxRetries)));
}

View File

@ -105,8 +105,8 @@ import static org.mockito.Mockito.when;
public final class BlobStoreTestUtil {
public static void assertRepoConsistency(InternalTestCluster testCluster, String repoName) {
final BlobStoreRepository repo =
(BlobStoreRepository) testCluster.getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
final BlobStoreRepository repo = (BlobStoreRepository) testCluster.getCurrentMasterNodeInstance(RepositoriesService.class)
.repository(repoName);
BlobStoreTestUtil.assertConsistency(repo, repo.threadPool().executor(ThreadPool.Names.GENERIC));
}
@ -131,9 +131,11 @@ public final class BlobStoreTestUtil {
}
assertIndexGenerations(blobContainer, latestGen);
final RepositoryData repositoryData;
try (InputStream blob = blobContainer.readBlob(BlobStoreRepository.INDEX_FILE_PREFIX + latestGen);
XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE, blob)) {
try (
InputStream blob = blobContainer.readBlob(BlobStoreRepository.INDEX_FILE_PREFIX + latestGen);
XContentParser parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob)
) {
repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen, false);
}
assertIndexUUIDs(repository, repositoryData);
@ -151,9 +153,13 @@ public final class BlobStoreTestUtil {
}
private static void assertIndexGenerations(BlobContainer repoRoot, long latestGen) throws IOException {
final long[] indexGenerations = repoRoot.listBlobsByPrefix(BlobStoreRepository.INDEX_FILE_PREFIX).keySet().stream()
final long[] indexGenerations = repoRoot.listBlobsByPrefix(BlobStoreRepository.INDEX_FILE_PREFIX)
.keySet()
.stream()
.map(s -> s.replace(BlobStoreRepository.INDEX_FILE_PREFIX, ""))
.mapToLong(Long::parseLong).sorted().toArray();
.mapToLong(Long::parseLong)
.sorted()
.toArray();
assertEquals(latestGen, indexGenerations[indexGenerations.length - 1]);
assertTrue(indexGenerations.length <= 2);
}
@ -171,8 +177,10 @@ public final class BlobStoreTestUtil {
if (generation != null && generation.equals(ShardGenerations.NEW_SHARD_GEN) == false) {
final String shardId = Integer.toString(i);
assertThat(shardContainers, hasKey(shardId));
assertThat(shardContainers.get(shardId).listBlobsByPrefix(BlobStoreRepository.INDEX_FILE_PREFIX),
hasKey(BlobStoreRepository.INDEX_FILE_PREFIX + generation));
assertThat(
shardContainers.get(shardId).listBlobsByPrefix(BlobStoreRepository.INDEX_FILE_PREFIX),
hasKey(BlobStoreRepository.INDEX_FILE_PREFIX + generation)
);
}
}
}
@ -180,31 +188,39 @@ public final class BlobStoreTestUtil {
}
private static void assertIndexUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException {
final List<String> expectedIndexUUIDs =
repositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toList());
final List<String> expectedIndexUUIDs = repositoryData.getIndices()
.values()
.stream()
.map(IndexId::getId)
.collect(Collectors.toList());
final BlobContainer indicesContainer = repository.blobContainer().children().get("indices");
final List<String> foundIndexUUIDs;
if (indicesContainer == null) {
foundIndexUUIDs = Collections.emptyList();
} else {
// Skip Lucene MockFS extraN directory
foundIndexUUIDs = indicesContainer.children().keySet().stream().filter(
s -> s.startsWith("extra") == false).collect(Collectors.toList());
foundIndexUUIDs = indicesContainer.children()
.keySet()
.stream()
.filter(s -> s.startsWith("extra") == false)
.collect(Collectors.toList());
}
assertThat(foundIndexUUIDs, containsInAnyOrder(expectedIndexUUIDs.toArray(Strings.EMPTY_ARRAY)));
for (String indexId : foundIndexUUIDs) {
final Set<String> indexMetaGenerationsFound = indicesContainer.children().get(indexId)
.listBlobsByPrefix(BlobStoreRepository.METADATA_PREFIX).keySet().stream()
final Set<String> indexMetaGenerationsFound = indicesContainer.children()
.get(indexId)
.listBlobsByPrefix(BlobStoreRepository.METADATA_PREFIX)
.keySet()
.stream()
.map(p -> p.replace(BlobStoreRepository.METADATA_PREFIX, "").replace(".dat", ""))
.collect(Collectors.toSet());
final Set<String> indexMetaGenerationsExpected = new HashSet<>();
final IndexId idx =
repositoryData.getIndices().values().stream().filter(i -> i.getId().equals(indexId)).findFirst().get();
final IndexId idx = repositoryData.getIndices().values().stream().filter(i -> i.getId().equals(indexId)).findFirst().get();
for (SnapshotId snapshotId : repositoryData.getSnapshots(idx)) {
indexMetaGenerationsExpected.add(repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotId, idx));
}
// TODO: assertEquals(indexMetaGenerationsExpected, indexMetaGenerationsFound); requires cleanup functionality for
// index meta generations blobs
// index meta generations blobs
assertTrue(indexMetaGenerationsFound.containsAll(indexMetaGenerationsExpected));
}
}
@ -213,11 +229,14 @@ public final class BlobStoreTestUtil {
final BlobContainer repoRoot = repository.blobContainer();
final Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
final List<String> expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).collect(Collectors.toList());
for (String prefix : new String[]{BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.METADATA_PREFIX}) {
final Collection<String> foundSnapshotUUIDs = repoRoot.listBlobs().keySet().stream().filter(p -> p.startsWith(prefix))
.map(p -> p.replace(prefix, "").replace(".dat", ""))
.collect(Collectors.toSet());
assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY)));
for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.METADATA_PREFIX }) {
final Collection<String> foundSnapshotUUIDs = repoRoot.listBlobs()
.keySet()
.stream()
.filter(p -> p.startsWith(prefix))
.map(p -> p.replace(prefix, "").replace(".dat", ""))
.collect(Collectors.toSet());
assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY)));
}
final BlobContainer indicesContainer = repository.getBlobContainer().children().get("indices");
@ -230,15 +249,22 @@ public final class BlobStoreTestUtil {
final Map<IndexId, Integer> maxShardCountsExpected = new HashMap<>();
final Map<IndexId, Integer> maxShardCountsSeen = new HashMap<>();
// Assert that for each snapshot, the relevant metadata was written to index and shard folders
for (SnapshotId snapshotId: snapshotIds) {
for (SnapshotId snapshotId : snapshotIds) {
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
for (String index : snapshotInfo.indices()) {
final IndexId indexId = repositoryData.resolveIndexId(index);
assertThat(indices, hasKey(indexId.getId()));
final BlobContainer indexContainer = indices.get(indexId.getId());
assertThat(indexContainer.listBlobs(),
hasKey(String.format(Locale.ROOT, BlobStoreRepository.METADATA_NAME_FORMAT,
repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotId, indexId))));
assertThat(
indexContainer.listBlobs(),
hasKey(
String.format(
Locale.ROOT,
BlobStoreRepository.METADATA_NAME_FORMAT,
repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotId, indexId)
)
)
);
final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, snapshotId, indexId);
for (Map.Entry<String, BlobContainer> entry : indexContainer.children().entrySet()) {
// Skip Lucene MockFS extraN directory
@ -248,38 +274,55 @@ public final class BlobStoreTestUtil {
final int shardId = Integer.parseInt(entry.getKey());
final int shardCount = indexMetadata.getNumberOfShards();
maxShardCountsExpected.compute(
indexId, (i, existing) -> existing == null || existing < shardCount ? shardCount : existing);
indexId,
(i, existing) -> existing == null || existing < shardCount ? shardCount : existing
);
final BlobContainer shardContainer = entry.getValue();
// TODO: we shouldn't be leaking empty shard directories when a shard (but not all of the index it belongs to)
// becomes unreferenced. We should fix that and remove this conditional once its fixed.
// becomes unreferenced. We should fix that and remove this conditional once its fixed.
if (shardContainer.listBlobs().keySet().stream().anyMatch(blob -> blob.startsWith("extra") == false)) {
final int impliedCount = shardId - 1;
maxShardCountsSeen.compute(
indexId, (i, existing) -> existing == null || existing < impliedCount ? impliedCount : existing);
indexId,
(i, existing) -> existing == null || existing < impliedCount ? impliedCount : existing
);
}
if (shardId < shardCount && snapshotInfo.shardFailures().stream().noneMatch(
shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) {
if (shardId < shardCount
&& snapshotInfo.shardFailures()
.stream()
.noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) {
final Map<String, BlobMetadata> shardPathContents = shardContainer.listBlobs();
assertThat(shardPathContents,
hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())));
assertThat(shardPathContents.keySet().stream()
.filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)).count(), lessThanOrEqualTo(2L));
assertThat(
shardPathContents,
hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()))
);
assertThat(
shardPathContents.keySet()
.stream()
.filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX))
.count(),
lessThanOrEqualTo(2L)
);
}
}
}
}
maxShardCountsSeen.forEach(((indexId, count) -> assertThat("Found unreferenced shard paths for index [" + indexId + "]",
count, lessThanOrEqualTo(maxShardCountsExpected.get(indexId)))));
maxShardCountsSeen.forEach(
((indexId, count) -> assertThat(
"Found unreferenced shard paths for index [" + indexId + "]",
count,
lessThanOrEqualTo(maxShardCountsExpected.get(indexId))
))
);
}
public static long createDanglingIndex(BlobStoreRepository repository, String name, Set<String> files)
throws InterruptedException, ExecutionException {
public static long createDanglingIndex(BlobStoreRepository repository, String name, Set<String> files) throws InterruptedException,
ExecutionException {
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
final AtomicLong totalSize = new AtomicLong();
repository.threadPool().generic().execute(ActionRunnable.run(future, () -> {
final BlobStore blobStore = repository.blobStore();
BlobContainer container =
blobStore.blobContainer(repository.basePath().add("indices").add(name));
BlobContainer container = blobStore.blobContainer(repository.basePath().add("indices").add(name));
for (String file : files) {
int size = randomIntBetween(0, 10);
totalSize.addAndGet(size);
@ -295,14 +338,13 @@ public final class BlobStoreTestUtil {
repository.threadPool().generic().execute(ActionRunnable.supply(future, () -> {
final BlobStore blobStore = repository.blobStore();
for (String index : indexToFiles.keySet()) {
if (blobStore.blobContainer(repository.basePath().add("indices"))
.children().containsKey(index) == false) {
if (blobStore.blobContainer(repository.basePath().add("indices")).children().containsKey(index) == false) {
return false;
}
for (String file : indexToFiles.get(index)) {
try (InputStream ignored =
blobStore.blobContainer(repository.basePath().add("indices").add(index)).readBlob(file)) {
} catch (NoSuchFileException e) {
try (
InputStream ignored = blobStore.blobContainer(repository.basePath().add("indices").add(index)).readBlob(file)
) {} catch (NoSuchFileException e) {
return false;
}
}
@ -314,8 +356,9 @@ public final class BlobStoreTestUtil {
public static void assertBlobsByPrefix(BlobStoreRepository repository, BlobPath path, String prefix, Map<String, BlobMetadata> blobs) {
final PlainActionFuture<Map<String, BlobMetadata>> future = PlainActionFuture.newFuture();
repository.threadPool().generic().execute(
ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).listBlobsByPrefix(prefix)));
repository.threadPool()
.generic()
.execute(ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).listBlobsByPrefix(prefix)));
Map<String, BlobMetadata> foundBlobs = future.actionGet();
if (blobs.isEmpty()) {
assertThat(foundBlobs.keySet(), empty());
@ -346,9 +389,15 @@ public final class BlobStoreTestUtil {
* @return Mock ClusterService
*/
public static ClusterService mockClusterService(RepositoryMetadata metadata) {
return mockClusterService(ClusterState.builder(ClusterState.EMPTY_STATE).metadata(
Metadata.builder().putCustom(RepositoriesMetadata.TYPE,
new RepositoriesMetadata(Collections.singletonList(metadata))).build()).build());
return mockClusterService(
ClusterState.builder(ClusterState.EMPTY_STATE)
.metadata(
Metadata.builder()
.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(metadata)))
.build()
)
.build()
);
}
private static ClusterService mockClusterService(ClusterState initialState) {
@ -356,15 +405,18 @@ public final class BlobStoreTestUtil {
when(threadPool.executor(ThreadPool.Names.SNAPSHOT)).thenReturn(new SameThreadExecutorService());
when(threadPool.generic()).thenReturn(new SameThreadExecutorService());
when(threadPool.info(ThreadPool.Names.SNAPSHOT)).thenReturn(
new ThreadPool.Info(ThreadPool.Names.SNAPSHOT, ThreadPool.ThreadPoolType.FIXED, randomIntBetween(1, 10)));
new ThreadPool.Info(ThreadPool.Names.SNAPSHOT, ThreadPool.ThreadPoolType.FIXED, randomIntBetween(1, 10))
);
final ClusterService clusterService = mock(ClusterService.class);
final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class);
when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService);
// Setting local node as master so it may update the repository metadata in the cluster state
final DiscoveryNode localNode = new DiscoveryNode("", buildNewFakeTransportAddress(), Version.CURRENT);
final AtomicReference<ClusterState> currentState = new AtomicReference<>(
ClusterState.builder(initialState).nodes(
DiscoveryNodes.builder().add(localNode).masterNodeId(localNode.getId()).localNodeId(localNode.getId()).build()).build());
ClusterState.builder(initialState)
.nodes(DiscoveryNodes.builder().add(localNode).masterNodeId(localNode.getId()).localNodeId(localNode.getId()).build())
.build()
);
when(clusterService.state()).then(invocationOnMock -> currentState.get());
final List<ClusterStateApplier> appliers = new CopyOnWriteArrayList<>();
doAnswer(invocation -> {
@ -372,8 +424,9 @@ public final class BlobStoreTestUtil {
final ClusterState current = currentState.get();
final ClusterState next = task.execute(current);
currentState.set(next);
appliers.forEach(applier -> applier.applyClusterState(
new ClusterChangedEvent((String) invocation.getArguments()[0], next, current)));
appliers.forEach(
applier -> applier.applyClusterState(new ClusterChangedEvent((String) invocation.getArguments()[0], next, current))
);
task.clusterStateProcessed((String) invocation.getArguments()[0], current, next);
return null;
}).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class));

View File

@ -103,10 +103,9 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
final boolean verify = randomBoolean();
logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings);
assertAcked(client().admin().cluster().preparePutRepository(name)
.setType(repositoryType())
.setVerify(verify)
.setSettings(settings));
assertAcked(
client().admin().cluster().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings)
);
internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> {
assertThat(repositories.repository(name), notNullValue());
@ -216,8 +215,12 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
}
}
public static void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray,
boolean failIfAlreadyExists) throws IOException {
public static void writeBlob(
final BlobContainer container,
final String blobName,
final BytesArray bytesArray,
boolean failIfAlreadyExists
) throws IOException {
try (InputStream stream = bytesArray.streamInput()) {
if (randomBoolean()) {
container.writeBlob(blobName, stream, bytesArray.length(), failIfAlreadyExists);
@ -277,10 +280,12 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
protected BlobStore newBlobStore() {
final String repository = createRepository(randomName());
final BlobStoreRepository blobStoreRepository =
(BlobStoreRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repository);
final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getMasterNodeInstance(
RepositoriesService.class
).repository(repository);
return PlainActionFuture.get(
f -> blobStoreRepository.threadPool().generic().execute(ActionRunnable.supply(f, blobStoreRepository::blobStore)));
f -> blobStoreRepository.threadPool().generic().execute(ActionRunnable.supply(f, blobStoreRepository::blobStore))
);
}
public void testSnapshotAndRestore() throws Exception {
@ -297,8 +302,9 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
final String snapshotName = randomName();
logger.info("--> create snapshot {}:{}", repoName, snapshotName);
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName)
.setWaitForCompletion(true).setIndices(indexNames));
assertSuccessfulSnapshot(
client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexNames)
);
List<String> deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames);
if (deleteIndices.size() > 0) {
@ -345,14 +351,17 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
logger.info("--> delete snapshot {}:{}", repoName, snapshotName);
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get());
expectThrows(SnapshotMissingException.class, () ->
client().admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get());
expectThrows(
SnapshotMissingException.class,
() -> client().admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get()
);
expectThrows(SnapshotMissingException.class, () ->
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get());
expectThrows(SnapshotMissingException.class, () -> client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get());
expectThrows(SnapshotRestoreException.class, () ->
client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get());
expectThrows(
SnapshotRestoreException.class,
() -> client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get()
);
}
public void testMultipleSnapshotAndRollback() throws Exception {
@ -382,8 +391,13 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
// Check number of documents in this iteration
docCounts[i] = (int) client().prepareSearch(indexName).setSize(0).get().getHits().getTotalHits().value;
logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]);
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName + "-" + i)
.setWaitForCompletion(true).setIndices(indexName));
assertSuccessfulSnapshot(
client().admin()
.cluster()
.prepareCreateSnapshot(repoName, snapshotName + "-" + i)
.setWaitForCompletion(true)
.setIndices(indexName)
);
}
int restoreOperations = randomIntBetween(1, 3);
@ -397,8 +411,12 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
assertAcked(client().admin().indices().prepareClose(indexName));
logger.info("--> restore index from the snapshot");
assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore)
.setWaitForCompletion(true));
assertSuccessfulRestore(
client().admin()
.cluster()
.prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore)
.setWaitForCompletion(true)
);
ensureGreen();
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCounts[iterationToRestore]);
@ -425,8 +443,11 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
refresh();
logger.info("--> take a snapshot");
CreateSnapshotResponse createSnapshotResponse =
client.admin().cluster().prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get();
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(repoName, "test-snap")
.setWaitForCompletion(true)
.get();
assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards());
logger.info("--> indexing more data");
@ -437,10 +458,12 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
}
logger.info("--> take another snapshot with only 2 of the 3 indices");
createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, "test-snap2")
.setWaitForCompletion(true)
.setIndices("test-idx-1", "test-idx-2")
.get();
createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(repoName, "test-snap2")
.setWaitForCompletion(true)
.setIndices("test-idx-1", "test-idx-2")
.get();
assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards());
logger.info("--> delete a snapshot");
@ -471,7 +494,8 @@ public abstract class OpenSearchBlobStoreRepositoryIntegTestCase extends OpenSea
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
indexRequestBuilders[i] = client().prepareIndex(name, name, Integer.toString(i))
.setRouting(randomAlphaOfLength(randomIntBetween(1, 10))).setSource("field", "value");
.setRouting(randomAlphaOfLength(randomIntBetween(1, 10)))
.setSource("field", "value");
}
indexRandom(true, indexRequestBuilders);
}

View File

@ -81,7 +81,7 @@ import static org.hamcrest.Matchers.hasSize;
*/
@SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service")
// The tests in here do a lot of state updates and other writes to disk and are slowed down too much by WindowsFS
@LuceneTestCase.SuppressFileSystems(value = {"WindowsFS", "ExtrasFS"})
@LuceneTestCase.SuppressFileSystems(value = { "WindowsFS", "ExtrasFS" })
public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends OpenSearchBlobStoreRepositoryIntegTestCase {
/**
@ -129,15 +129,18 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
@After
public void tearDownHttpServer() {
if (handlers != null) {
for(Map.Entry<String, HttpHandler> handler : handlers.entrySet()) {
for (Map.Entry<String, HttpHandler> handler : handlers.entrySet()) {
httpServer.removeContext(handler.getKey());
HttpHandler h = handler.getValue();
while (h instanceof DelegatingHttpHandler) {
h = ((DelegatingHttpHandler) h).getDelegate();
}
if (h instanceof BlobStoreHttpHandler) {
List<String> blobs = ((BlobStoreHttpHandler) h).blobs().keySet().stream()
.filter(blob -> blob.contains("index") == false).collect(Collectors.toList());
List<String> blobs = ((BlobStoreHttpHandler) h).blobs()
.keySet()
.stream()
.filter(blob -> blob.contains("index") == false)
.collect(Collectors.toList());
assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0));
}
}
@ -154,10 +157,10 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
public void testSnapshotWithLargeSegmentFiles() throws Exception {
final String repository = createRepository(randomName());
final String index = "index-no-merges";
createIndex(index, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build());
createIndex(
index,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
final long nbDocs = randomLongBetween(10_000L, 20_000L);
try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) {
@ -170,8 +173,9 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
final String snapshot = "snapshot";
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repository, snapshot)
.setWaitForCompletion(true).setIndices(index));
assertSuccessfulSnapshot(
client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)
);
assertAcked(client().admin().indices().prepareDelete(index));
@ -185,10 +189,10 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
public void testRequestStats() throws Exception {
final String repository = createRepository(randomName());
final String index = "index-no-merges";
createIndex(index, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build());
createIndex(
index,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
final long nbDocs = randomLongBetween(10_000L, 20_000L);
try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) {
@ -201,8 +205,9 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
final String snapshot = "snapshot";
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repository, snapshot)
.setWaitForCompletion(true).setIndices(index));
assertSuccessfulSnapshot(
client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)
);
assertAcked(client().admin().indices().prepareDelete(index));
@ -213,26 +218,21 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get());
final RepositoryStats repositoryStats = StreamSupport.stream(
internalCluster().getInstances(RepositoriesService.class).spliterator(), false)
.map(repositoriesService -> {
try {
return repositoriesService.repository(repository);
} catch (RepositoryMissingException e) {
return null;
}
})
.filter(Objects::nonNull)
.map(Repository::stats)
.reduce(RepositoryStats::merge)
.get();
internalCluster().getInstances(RepositoriesService.class).spliterator(),
false
).map(repositoriesService -> {
try {
return repositoriesService.repository(repository);
} catch (RepositoryMissingException e) {
return null;
}
}).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get();
Map<String, Long> sdkRequestCounts = repositoryStats.requestCounts;
final Map<String, Long> mockCalls = getMockRequestCounts();
String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls",
sdkRequestCounts,
mockCalls);
String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", sdkRequestCounts, mockCalls);
assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts);
}
@ -258,7 +258,8 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
* Consumes and closes the given {@link InputStream}
*/
protected static void drainInputStream(final InputStream inputStream) throws IOException {
while (inputStream.read(BUFFER) >= 0) ;
while (inputStream.read(BUFFER) >= 0)
;
}
/**
@ -408,8 +409,15 @@ public abstract class OpenSearchMockAPIBasedRepositoryIntegTestCase extends Open
try {
handler.handle(exchange);
} catch (Throwable t) {
logger.error(() -> new ParameterizedMessage("Exception when handling request {} {} {}",
exchange.getRemoteAddress(), exchange.getRequestMethod(), exchange.getRequestURI()), t);
logger.error(
() -> new ParameterizedMessage(
"Exception when handling request {} {} {}",
exchange.getRemoteAddress(),
exchange.getRequestMethod(),
exchange.getRequestURI()
),
t
);
throw t;
}
}

View File

@ -40,19 +40,34 @@ import java.util.function.Function;
*/
public abstract class MockDeterministicScript implements Function<Map<String, Object>, Object>, ScriptFactory {
public abstract Object apply(Map<String, Object> vars);
public abstract boolean isResultDeterministic();
public static MockDeterministicScript asDeterministic(Function<Map<String, Object>, Object> script) {
return new MockDeterministicScript() {
@Override public boolean isResultDeterministic() { return true; }
@Override public Object apply(Map<String, Object> vars) { return script.apply(vars); }
@Override
public boolean isResultDeterministic() {
return true;
}
@Override
public Object apply(Map<String, Object> vars) {
return script.apply(vars);
}
};
}
public static MockDeterministicScript asNonDeterministic(Function<Map<String, Object>, Object> script) {
return new MockDeterministicScript() {
@Override public boolean isResultDeterministic() { return false; }
@Override public Object apply(Map<String, Object> vars) { return script.apply(vars); }
@Override
public boolean isResultDeterministic() {
return false;
}
@Override
public Object apply(Map<String, Object> vars) {
return script.apply(vars);
}
};
}
}

View File

@ -80,14 +80,20 @@ public class MockScriptEngine implements ScriptEngine {
private final Map<String, MockDeterministicScript> scripts;
private final Map<ScriptContext<?>, ContextCompiler> contexts;
public MockScriptEngine(String type, Map<String, Function<Map<String, Object>, Object>> scripts,
Map<ScriptContext<?>, ContextCompiler> contexts) {
public MockScriptEngine(
String type,
Map<String, Function<Map<String, Object>, Object>> scripts,
Map<ScriptContext<?>, ContextCompiler> contexts
) {
this(type, scripts, Collections.emptyMap(), contexts);
}
public MockScriptEngine(String type, Map<String, Function<Map<String, Object>, Object>> deterministicScripts,
Map<String, Function<Map<String, Object>, Object>> nonDeterministicScripts,
Map<ScriptContext<?>, ContextCompiler> contexts) {
public MockScriptEngine(
String type,
Map<String, Function<Map<String, Object>, Object>> deterministicScripts,
Map<String, Function<Map<String, Object>, Object>> nonDeterministicScripts,
Map<ScriptContext<?>, ContextCompiler> contexts
) {
Map<String, MockDeterministicScript> scripts = new HashMap<>(deterministicScripts.size() + nonDeterministicScripts.size());
deterministicScripts.forEach((key, value) -> scripts.put(key, MockDeterministicScript.asDeterministic(value)));
@ -113,15 +119,24 @@ public class MockScriptEngine implements ScriptEngine {
// source is always provided. For stored and file scripts, the source of the script must match the key of a predefined script.
MockDeterministicScript script = scripts.get(source);
if (script == null) {
throw new IllegalArgumentException("No pre defined script matching [" + source + "] for script with name [" + name + "], " +
"did you declare the mocked script?");
throw new IllegalArgumentException(
"No pre defined script matching ["
+ source
+ "] for script with name ["
+ name
+ "], "
+ "did you declare the mocked script?"
);
}
MockCompiledScript mockCompiled = new MockCompiledScript(name, params, source, script);
if (context.instanceClazz.equals(FieldScript.class)) {
return context.factoryClazz.cast(new MockFieldScriptFactory(script));
} else if(context.instanceClazz.equals(TermsSetQueryScript.class)) {
TermsSetQueryScript.Factory factory = (parameters, lookup) -> (TermsSetQueryScript.LeafFactory) ctx
-> new TermsSetQueryScript(parameters, lookup, ctx) {
} else if (context.instanceClazz.equals(TermsSetQueryScript.class)) {
TermsSetQueryScript.Factory factory = (parameters, lookup) -> (TermsSetQueryScript.LeafFactory) ctx -> new TermsSetQueryScript(
parameters,
lookup,
ctx
) {
@Override
public Number execute() {
Map<String, Object> vars = new HashMap<>(parameters);
@ -162,7 +177,7 @@ public class MockScriptEngine implements ScriptEngine {
}
};
return context.factoryClazz.cast(factory);
} else if(context.instanceClazz.equals(AggregationScript.class)) {
} else if (context.instanceClazz.equals(AggregationScript.class)) {
return context.factoryClazz.cast(new MockAggregationScript(script));
} else if (context.instanceClazz.equals(IngestConditionalScript.class)) {
IngestConditionalScript.Factory factory = parameters -> new IngestConditionalScript(parameters) {
@ -411,8 +426,15 @@ public class MockScriptEngine implements ScriptEngine {
public static class MockMetricAggInitScriptFactory implements ScriptedMetricAggContexts.InitScript.Factory {
private final MockDeterministicScript script;
MockMetricAggInitScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockMetricAggInitScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public ScriptedMetricAggContexts.InitScript newInstance(Map<String, Object> params, Map<String, Object> state) {
@ -423,8 +445,7 @@ public class MockScriptEngine implements ScriptEngine {
public static class MockMetricAggInitScript extends ScriptedMetricAggContexts.InitScript {
private final Function<Map<String, Object>, Object> script;
MockMetricAggInitScript(Map<String, Object> params, Map<String, Object> state,
Function<Map<String, Object>, Object> script) {
MockMetricAggInitScript(Map<String, Object> params, Map<String, Object> state, Function<Map<String, Object>, Object> script) {
super(params, state);
this.script = script;
}
@ -442,14 +463,24 @@ public class MockScriptEngine implements ScriptEngine {
}
}
public static class MockMetricAggMapScriptFactory implements ScriptedMetricAggContexts.MapScript.Factory {
public static class MockMetricAggMapScriptFactory implements ScriptedMetricAggContexts.MapScript.Factory {
private final MockDeterministicScript script;
MockMetricAggMapScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockMetricAggMapScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public ScriptedMetricAggContexts.MapScript.LeafFactory newFactory(Map<String, Object> params, Map<String, Object> state,
SearchLookup lookup) {
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public ScriptedMetricAggContexts.MapScript.LeafFactory newFactory(
Map<String, Object> params,
Map<String, Object> state,
SearchLookup lookup
) {
return new MockMetricAggMapScript(params, state, lookup, script);
}
}
@ -460,8 +491,12 @@ public class MockScriptEngine implements ScriptEngine {
private final SearchLookup lookup;
private final Function<Map<String, Object>, Object> script;
MockMetricAggMapScript(Map<String, Object> params, Map<String, Object> state, SearchLookup lookup,
Function<Map<String, Object>, Object> script) {
MockMetricAggMapScript(
Map<String, Object> params,
Map<String, Object> state,
SearchLookup lookup,
Function<Map<String, Object>, Object> script
) {
this.params = params;
this.state = state;
this.lookup = lookup;
@ -492,8 +527,15 @@ public class MockScriptEngine implements ScriptEngine {
public static class MockMetricAggCombineScriptFactory implements ScriptedMetricAggContexts.CombineScript.Factory {
private final MockDeterministicScript script;
MockMetricAggCombineScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockMetricAggCombineScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public ScriptedMetricAggContexts.CombineScript newInstance(Map<String, Object> params, Map<String, Object> state) {
@ -524,8 +566,15 @@ public class MockScriptEngine implements ScriptEngine {
public static class MockMetricAggReduceScriptFactory implements ScriptedMetricAggContexts.ReduceScript.Factory {
private final MockDeterministicScript script;
MockMetricAggReduceScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockMetricAggReduceScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public ScriptedMetricAggContexts.ReduceScript newInstance(Map<String, Object> params, List<Object> states) {
@ -605,8 +654,15 @@ public class MockScriptEngine implements ScriptEngine {
class MockAggregationScript implements AggregationScript.Factory {
private final MockDeterministicScript script;
MockAggregationScript(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockAggregationScript(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public AggregationScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup) {
@ -636,8 +692,15 @@ public class MockScriptEngine implements ScriptEngine {
class MockSignificantTermsHeuristicScoreScript implements SignificantTermsHeuristicScoreScript.Factory {
private final MockDeterministicScript script;
MockSignificantTermsHeuristicScoreScript(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockSignificantTermsHeuristicScoreScript(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public SignificantTermsHeuristicScoreScript newInstance() {
@ -652,8 +715,15 @@ public class MockScriptEngine implements ScriptEngine {
class MockFieldScriptFactory implements FieldScript.Factory {
private final MockDeterministicScript script;
MockFieldScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockFieldScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public FieldScript.LeafFactory newFactory(Map<String, Object> parameters, SearchLookup lookup) {
@ -671,8 +741,15 @@ public class MockScriptEngine implements ScriptEngine {
class MockStringSortScriptFactory implements StringSortScript.Factory {
private final MockDeterministicScript script;
MockStringSortScriptFactory(MockDeterministicScript script) { this.script = script; }
@Override public boolean isResultDeterministic() { return script.isResultDeterministic(); }
MockStringSortScriptFactory(MockDeterministicScript script) {
this.script = script;
}
@Override
public boolean isResultDeterministic() {
return script.isResultDeterministic();
}
@Override
public StringSortScript.LeafFactory newFactory(Map<String, Object> parameters, SearchLookup lookup) {

View File

@ -55,7 +55,9 @@ public abstract class MockScriptPlugin extends Plugin implements ScriptPlugin {
protected abstract Map<String, Function<Map<String, Object>, Object>> pluginScripts();
protected Map<String, Function<Map<String, Object>, Object>> nonDeterministicPluginScripts() { return Collections.emptyMap(); }
protected Map<String, Function<Map<String, Object>, Object>> nonDeterministicPluginScripts() {
return Collections.emptyMap();
}
protected Map<ScriptContext<?>, MockScriptEngine.ContextCompiler> pluginContextCompilers() {
return Collections.emptyMap();

View File

@ -56,8 +56,11 @@ public class MockScriptService extends ScriptService {
return false;
}
public static <T> MockScriptService singleContext(ScriptContext<T> context, Function<String, T> compile,
Map<String, StoredScriptSource> storedLookup) {
public static <T> MockScriptService singleContext(
ScriptContext<T> context,
Function<String, T> compile,
Map<String, StoredScriptSource> storedLookup
) {
ScriptEngine engine = new ScriptEngine() {
@Override
public String getType() {
@ -65,8 +68,12 @@ public class MockScriptService extends ScriptService {
}
@Override
public <FactoryType> FactoryType compile(String name, String code, ScriptContext<FactoryType> context,
Map<String, String> params) {
public <FactoryType> FactoryType compile(
String name,
String code,
ScriptContext<FactoryType> context,
Map<String, String> params
) {
return context.factoryClazz.cast(compile.apply(code));
}
@ -75,8 +82,11 @@ public class MockScriptService extends ScriptService {
return org.opensearch.common.collect.Set.of(context);
}
};
return new MockScriptService(Settings.EMPTY, org.opensearch.common.collect.Map.of("lang", engine),
org.opensearch.common.collect.Map.of(context.name, context)) {
return new MockScriptService(
Settings.EMPTY,
org.opensearch.common.collect.Map.of("lang", engine),
org.opensearch.common.collect.Map.of(context.name, context)
) {
@Override
protected StoredScriptSource getScriptFromClusterState(String id) {
return storedLookup.get(id);

View File

@ -61,12 +61,12 @@ public final class ScoreAccessor extends Number {
@Override
public int intValue() {
return (int)score();
return (int) score();
}
@Override
public long longValue() {
return (long)score();
return (long) score();
}
@Override

View File

@ -63,9 +63,11 @@ public class MockSearchService extends SearchService {
final Map<ReaderContext, Throwable> copy = new HashMap<>(ACTIVE_SEARCH_CONTEXTS);
if (copy.isEmpty() == false) {
throw new AssertionError(
"There are still [" + copy.size()
+ "] in-flight contexts. The first one's creation site is listed as the cause of this exception.",
copy.values().iterator().next());
"There are still ["
+ copy.size()
+ "] in-flight contexts. The first one's creation site is listed as the cause of this exception.",
copy.values().iterator().next()
);
}
}
@ -83,9 +85,15 @@ public class MockSearchService extends SearchService {
ACTIVE_SEARCH_CONTEXTS.remove(context);
}
public MockSearchService(ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService,
BigArrays bigArrays, FetchPhase fetchPhase, CircuitBreakerService circuitBreakerService) {
public MockSearchService(
ClusterService clusterService,
IndicesService indicesService,
ThreadPool threadPool,
ScriptService scriptService,
BigArrays bigArrays,
FetchPhase fetchPhase,
CircuitBreakerService circuitBreakerService
) {
super(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase, null, circuitBreakerService);
}

View File

@ -131,18 +131,18 @@ public class RandomSearchRequestGenerator {
searchRequest.source(randomSearchSourceBuilder.get());
}
if (randomBoolean()) {
searchRequest.setCancelAfterTimeInterval(
TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval"));
searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval"));
}
return searchRequest;
}
public static SearchSourceBuilder randomSearchSourceBuilder(
Supplier<HighlightBuilder> randomHighlightBuilder,
Supplier<SuggestBuilder> randomSuggestBuilder,
Supplier<RescorerBuilder<?>> randomRescoreBuilder,
Supplier<List<SearchExtBuilder>> randomExtBuilders,
Supplier<CollapseBuilder> randomCollapseBuilder) {
Supplier<HighlightBuilder> randomHighlightBuilder,
Supplier<SuggestBuilder> randomSuggestBuilder,
Supplier<RescorerBuilder<?>> randomRescoreBuilder,
Supplier<List<SearchExtBuilder>> randomExtBuilders,
Supplier<CollapseBuilder> randomCollapseBuilder
) {
SearchSourceBuilder builder = new SearchSourceBuilder();
if (randomBoolean()) {
builder.from(randomIntBetween(0, 10000));
@ -181,7 +181,7 @@ public class RandomSearchRequestGenerator {
}
}
switch(randomInt(2)) {
switch (randomInt(2)) {
case 0:
builder.storedFields();
break;
@ -236,8 +236,11 @@ public class RandomSearchRequestGenerator {
fetchSourceContext = new FetchSourceContext(true, includes, excludes);
break;
case 2:
fetchSourceContext = new FetchSourceContext(true, new String[]{randomAlphaOfLengthBetween(5, 20)},
new String[]{randomAlphaOfLengthBetween(5, 20)});
fetchSourceContext = new FetchSourceContext(
true,
new String[] { randomAlphaOfLengthBetween(5, 20) },
new String[] { randomAlphaOfLengthBetween(5, 20) }
);
break;
case 3:
fetchSourceContext = new FetchSourceContext(true, includes, excludes);
@ -246,7 +249,7 @@ public class RandomSearchRequestGenerator {
fetchSourceContext = new FetchSourceContext(true, includes, null);
break;
case 5:
fetchSourceContext = new FetchSourceContext(true, new String[] {randomAlphaOfLengthBetween(5, 20)}, null);
fetchSourceContext = new FetchSourceContext(true, new String[] { randomAlphaOfLengthBetween(5, 20) }, null);
break;
default:
throw new IllegalStateException();
@ -282,18 +285,21 @@ public class RandomSearchRequestGenerator {
builder.sort(SortBuilders.fieldSort(randomAlphaOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
break;
case 1:
builder.sort(SortBuilders.geoDistanceSort(randomAlphaOfLengthBetween(5, 20),
AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
builder.sort(
SortBuilders.geoDistanceSort(randomAlphaOfLengthBetween(5, 20), AbstractQueryTestCase.randomGeohash(1, 12))
.order(randomFrom(SortOrder.values()))
);
break;
case 2:
builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
break;
case 3:
builder.sort(SortBuilders
.scriptSort(
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "foo", emptyMap()),
ScriptSortBuilder.ScriptSortType.NUMBER)
.order(randomFrom(SortOrder.values())));
builder.sort(
SortBuilders.scriptSort(
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "foo", emptyMap()),
ScriptSortBuilder.ScriptSortType.NUMBER
).order(randomFrom(SortOrder.values()))
);
break;
case 4:
builder.sort(randomAlphaOfLengthBetween(5, 20));
@ -350,8 +356,11 @@ public class RandomSearchRequestGenerator {
jsonBuilder.endArray();
jsonBuilder.endObject();
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(jsonBuilder).streamInput());
.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(jsonBuilder).streamInput()
);
parser.nextToken();
parser.nextToken();
parser.nextToken();
@ -381,7 +390,7 @@ public class RandomSearchRequestGenerator {
if (randomBoolean()) {
String field = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 20);
int max = between(2, 1000);
int id = randomInt(max-1);
int id = randomInt(max - 1);
if (field == null) {
builder.slice(new SliceBuilder(id, max));
} else {

View File

@ -195,8 +195,11 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
return Collections.emptyMap();
}
private static void registerFieldTypes(SearchContext searchContext, MapperService mapperService,
Map<String, MappedFieldType> fieldNameToType) {
private static void registerFieldTypes(
SearchContext searchContext,
MapperService mapperService,
Map<String, MappedFieldType> fieldNameToType
) {
for (Map.Entry<String, MappedFieldType> entry : fieldNameToType.entrySet()) {
String fieldName = entry.getKey();
MappedFieldType fieldType = entry.getValue();
@ -222,43 +225,65 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
return Collections.emptyList();
}
protected <A extends Aggregator> A createAggregator(AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MappedFieldType... fieldTypes) throws IOException {
return createAggregator(aggregationBuilder, indexSearcher, createIndexSettings(),
new MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)), fieldTypes);
protected <A extends Aggregator> A createAggregator(
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MappedFieldType... fieldTypes
) throws IOException {
return createAggregator(
aggregationBuilder,
indexSearcher,
createIndexSettings(),
new MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)),
fieldTypes
);
}
protected <A extends Aggregator> A createAggregator(Query query,
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MappedFieldType... fieldTypes) throws IOException {
return createAggregator(query, aggregationBuilder, indexSearcher, indexSettings,
new MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)), fieldTypes);
protected <A extends Aggregator> A createAggregator(
Query query,
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MappedFieldType... fieldTypes
) throws IOException {
return createAggregator(
query,
aggregationBuilder,
indexSearcher,
indexSettings,
new MultiBucketConsumer(DEFAULT_MAX_BUCKETS, new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)),
fieldTypes
);
}
protected <A extends Aggregator> A createAggregator(Query query, AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends Aggregator> A createAggregator(
Query query,
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes
) throws IOException {
return createAggregator(query, aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes);
}
protected <A extends Aggregator> A createAggregator(AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends Aggregator> A createAggregator(
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes
) throws IOException {
return createAggregator(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes);
}
protected <A extends Aggregator> A createAggregator(Query query,
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends Aggregator> A createAggregator(
Query query,
AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
IndexSettings indexSettings,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes
) throws IOException {
SearchContext searchContext = createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, fieldTypes);
return createAggregator(aggregationBuilder, searchContext);
}
@ -275,25 +300,28 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
/**
* Create a {@linkplain SearchContext} for testing an {@link Aggregator}.
*/
protected SearchContext createSearchContext(IndexSearcher indexSearcher,
IndexSettings indexSettings,
Query query,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes) throws IOException {
protected SearchContext createSearchContext(
IndexSearcher indexSearcher,
IndexSettings indexSettings,
Query query,
MultiBucketConsumer bucketConsumer,
MappedFieldType... fieldTypes
) throws IOException {
return createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, new NoneCircuitBreakerService(), fieldTypes);
}
protected SearchContext createSearchContext(IndexSearcher indexSearcher,
IndexSettings indexSettings,
Query query,
MultiBucketConsumer bucketConsumer,
CircuitBreakerService circuitBreakerService,
MappedFieldType... fieldTypes) throws IOException {
protected SearchContext createSearchContext(
IndexSearcher indexSearcher,
IndexSettings indexSettings,
Query query,
MultiBucketConsumer bucketConsumer,
CircuitBreakerService circuitBreakerService,
MappedFieldType... fieldTypes
) throws IOException {
QueryCache queryCache = new DisabledQueryCache(indexSettings);
QueryCachingPolicy queryCachingPolicy = new QueryCachingPolicy() {
@Override
public void onUse(Query query) {
}
public void onUse(Query query) {}
@Override
public boolean shouldCache(Query query) {
@ -301,20 +329,23 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
return false;
}
};
ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher(indexSearcher.getIndexReader(),
indexSearcher.getSimilarity(), queryCache, queryCachingPolicy, false);
ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher(
indexSearcher.getIndexReader(),
indexSearcher.getSimilarity(),
queryCache,
queryCachingPolicy,
false
);
SearchContext searchContext = mock(SearchContext.class);
when(searchContext.numberOfShards()).thenReturn(1);
when(searchContext.searcher()).thenReturn(contextIndexSearcher);
when(searchContext.fetchPhase())
.thenReturn(new FetchPhase(Arrays.asList(new FetchSourcePhase(), new FetchDocValuesPhase())));
when(searchContext.fetchPhase()).thenReturn(new FetchPhase(Arrays.asList(new FetchSourcePhase(), new FetchDocValuesPhase())));
when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class)));
IndexShard indexShard = mock(IndexShard.class);
when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0));
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.aggregations())
.thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer));
when(searchContext.aggregations()).thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer));
when(searchContext.query()).thenReturn(query);
/*
* Always use the circuit breaking big arrays instance so that the CircuitBreakerService
@ -328,11 +359,20 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
when(mapperService.hasNested()).thenReturn(false);
when(searchContext.mapperService()).thenReturn(mapperService);
IndexFieldDataService ifds = new IndexFieldDataService(indexSettings,
IndexFieldDataService ifds = new IndexFieldDataService(
indexSettings,
new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() {
}), circuitBreakerService, mapperService);
QueryShardContext queryShardContext =
queryShardContextMock(contextIndexSearcher, mapperService, indexSettings, circuitBreakerService, bigArrays);
}),
circuitBreakerService,
mapperService
);
QueryShardContext queryShardContext = queryShardContextMock(
contextIndexSearcher,
mapperService,
indexSettings,
circuitBreakerService,
bigArrays
);
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
when(queryShardContext.getObjectMapper(anyString())).thenAnswer(invocation -> {
String fieldName = (String) invocation.getArguments()[0];
@ -344,13 +384,12 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
});
Map<String, MappedFieldType> fieldNameToType = new HashMap<>();
fieldNameToType.putAll(Arrays.stream(fieldTypes)
.filter(Objects::nonNull)
.collect(Collectors.toMap(MappedFieldType::name, Function.identity())));
fieldNameToType.putAll(
Arrays.stream(fieldTypes).filter(Objects::nonNull).collect(Collectors.toMap(MappedFieldType::name, Function.identity()))
);
fieldNameToType.putAll(getFieldAliases(fieldTypes));
registerFieldTypes(searchContext, mapperService,
fieldNameToType);
registerFieldTypes(searchContext, mapperService, fieldNameToType);
doAnswer(invocation -> {
/* Store the release-ables so we can release them at the end of the test case. This is important because aggregations don't
* close their sub-aggregations. This is fairly similar to what the production code does. */
@ -362,12 +401,13 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
protected IndexSettings createIndexSettings() {
return new IndexSettings(
IndexMetadata.builder("_index").settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(System.currentTimeMillis())
.build(),
Settings.EMPTY
IndexMetadata.builder("_index")
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(System.currentTimeMillis())
.build(),
Settings.EMPTY
);
}
@ -381,27 +421,46 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
/**
* sub-tests that need a more complex mock can overwrite this
*/
protected QueryShardContext queryShardContextMock(IndexSearcher searcher,
MapperService mapperService,
IndexSettings indexSettings,
CircuitBreakerService circuitBreakerService,
BigArrays bigArrays) {
protected QueryShardContext queryShardContextMock(
IndexSearcher searcher,
MapperService mapperService,
IndexSettings indexSettings,
CircuitBreakerService circuitBreakerService,
BigArrays bigArrays
) {
return new QueryShardContext(0, indexSettings, bigArrays, null,
return new QueryShardContext(
0,
indexSettings,
bigArrays,
null,
getIndexFieldDataLookup(mapperService, circuitBreakerService),
mapperService, null, getMockScriptService(), xContentRegistry(),
writableRegistry(), null, searcher, System::currentTimeMillis, null, null, () -> true,
valuesSourceRegistry);
mapperService,
null,
getMockScriptService(),
xContentRegistry(),
writableRegistry(),
null,
searcher,
System::currentTimeMillis,
null,
null,
() -> true,
valuesSourceRegistry
);
}
/**
* Sub-tests that need a more complex index field data provider can override this
*/
protected TriFunction<MappedFieldType, String, Supplier<SearchLookup>, IndexFieldData<?>> getIndexFieldDataLookup(
MapperService mapperService, CircuitBreakerService circuitBreakerService) {
MapperService mapperService,
CircuitBreakerService circuitBreakerService
) {
return (fieldType, s, searchLookup) -> fieldType.fielddataBuilder(
mapperService.getIndexSettings().getIndex().getName(), searchLookup)
.build(new IndexFieldDataCache.None(), circuitBreakerService);
mapperService.getIndexSettings().getIndex().getName(),
searchLookup
).build(new IndexFieldDataCache.None(), circuitBreakerService);
}
/**
@ -411,26 +470,32 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
return null;
}
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes
) throws IOException {
return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes);
}
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSettings indexSettings,
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(
IndexSettings indexSettings,
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes
) throws IOException {
return searchAndReduce(indexSettings, searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes);
}
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSearcher searcher,
Query query,
AggregationBuilder builder,
int maxBucket,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
int maxBucket,
MappedFieldType... fieldTypes
) throws IOException {
return searchAndReduce(createIndexSettings(), searcher, query, builder, maxBucket, fieldTypes);
}
@ -442,18 +507,22 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
* results together. The other half the time it aggregates across the entire
* index at once and runs a final reduction on the single resulting agg.
*/
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSettings indexSettings,
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
int maxBucket,
MappedFieldType... fieldTypes) throws IOException {
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(
IndexSettings indexSettings,
IndexSearcher searcher,
Query query,
AggregationBuilder builder,
int maxBucket,
MappedFieldType... fieldTypes
) throws IOException {
final IndexReaderContext ctx = searcher.getTopReaderContext();
final PipelineTree pipelines = builder.buildPipelineTree();
List<InternalAggregation> aggs = new ArrayList<>();
Query rewritten = searcher.rewrite(query);
MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(
maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)
);
C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes);
if (randomBoolean() && searcher.getIndexReader().leaves().size() > 0) {
@ -466,8 +535,10 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
}
for (ShardSearcher subSearcher : subSearchers) {
MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(
maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)
);
C a = createAggregator(query, builder, subSearcher, indexSettings, shardBucketConsumer, fieldTypes);
a.preCollection();
Weight weight = subSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f);
@ -489,17 +560,26 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
int r = randomIntBetween(1, toReduceSize);
List<InternalAggregation> toReduce = aggs.subList(0, r);
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forPartialReduction(
root.context().bigArrays(), getMockScriptService(), () -> PipelineAggregator.PipelineTree.EMPTY);
root.context().bigArrays(),
getMockScriptService(),
() -> PipelineAggregator.PipelineTree.EMPTY
);
A reduced = (A) aggs.get(0).reduce(toReduce, context);
aggs = new ArrayList<>(aggs.subList(r, toReduceSize));
aggs.add(reduced);
}
// now do the final reduce
MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer(maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST));
MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer(
maxBucket,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)
);
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction(
root.context().bigArrays(), getMockScriptService(), reduceBucketConsumer, pipelines);
root.context().bigArrays(),
getMockScriptService(),
reduceBucketConsumer,
pipelines
);
@SuppressWarnings("unchecked")
A internalAgg = (A) aggs.get(0).reduce(aggs, context);
@ -524,14 +604,14 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
Query query,
CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
Consumer<V> verify,
MappedFieldType... fieldTypes) throws IOException {
MappedFieldType... fieldTypes
) throws IOException {
try (Directory directory = newDirectory()) {
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
buildIndex.accept(indexWriter);
indexWriter.close();
try (DirectoryReader unwrapped = DirectoryReader.open(directory);
IndexReader indexReader = wrapDirectoryReader(unwrapped)) {
try (DirectoryReader unwrapped = DirectoryReader.open(directory); IndexReader indexReader = wrapDirectoryReader(unwrapped)) {
IndexSearcher indexSearcher = newIndexSearcher(indexReader);
V agg = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldTypes);
@ -627,8 +707,9 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
* @return an aggregation builder to test against the field
*/
protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) {
throw new UnsupportedOperationException("If getSupportedValuesSourceTypes() is implemented, " +
"createAggBuilderForTypeTest() must be implemented as well.");
throw new UnsupportedOperationException(
"If getSupportedValuesSourceTypes() is implemented, " + "createAggBuilderForTypeTest() must be implemented as well."
);
}
/**
@ -704,14 +785,26 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
try {
searchAndReduce(indexSearcher, new MatchAllDocsQuery(), aggregationBuilder, fieldType);
if (supportedVSTypes.contains(vst) == false || unsupportedMappedFieldTypes.contains(fieldType.typeName())) {
failure = new AssertionError("Aggregator [" + aggregationBuilder.getType() + "] should not support field type ["
+ fieldType.typeName() + "] but executing against the field did not throw an exception");
failure = new AssertionError(
"Aggregator ["
+ aggregationBuilder.getType()
+ "] should not support field type ["
+ fieldType.typeName()
+ "] but executing against the field did not throw an exception"
);
}
} catch (Exception | AssertionError e) {
if (supportedVSTypes.contains(vst) && unsupportedMappedFieldTypes.contains(fieldType.typeName()) == false) {
failure = new AssertionError("Aggregator [" + aggregationBuilder.getType() + "] supports field type ["
+ fieldType.typeName() + "] but executing against the field threw an exception: [" + e.getMessage() + "]",
e);
failure = new AssertionError(
"Aggregator ["
+ aggregationBuilder.getType()
+ "] supports field type ["
+ fieldType.typeName()
+ "] but executing against the field threw an exception: ["
+ e.getMessage()
+ "]",
e
);
}
}
if (failure != null) {
@ -723,9 +816,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
}
private ValuesSourceType fieldToVST(MappedFieldType fieldType) {
return fieldType.fielddataBuilder("", () -> {
throw new UnsupportedOperationException();
}).build(null, null).getValuesSourceType();
return fieldType.fielddataBuilder("", () -> { throw new UnsupportedOperationException(); }).build(null, null).getValuesSourceType();
}
/**
@ -758,8 +849,8 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
json = "{ \"" + fieldName + "\" : \"" + f + "\" }";
} else {
// smallest numeric is a byte so we select the smallest
v = Math.abs(randomByte());
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
v = Math.abs(randomByte());
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
}
doc.add(new SortedNumericDocValuesField(fieldName, v));
@ -783,9 +874,9 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
doc.add(new SortedNumericDocValuesField(fieldName, v));
json = "{ \"" + fieldName + "\" : \"" + (v == 0 ? "false" : "true") + "\" }";
} else if (vst.equals(CoreValuesSourceType.IP)) {
InetAddress ip = randomIp(randomBoolean());
json = "{ \"" + fieldName + "\" : \"" + NetworkAddress.format(ip) + "\" }";
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(InetAddressPoint.encode(ip))));
InetAddress ip = randomIp(randomBoolean());
json = "{ \"" + fieldName + "\" : \"" + NetworkAddress.format(ip) + "\" }";
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(InetAddressPoint.encode(ip))));
} else if (vst.equals(CoreValuesSourceType.RANGE)) {
Object start;
Object end;
@ -822,11 +913,17 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
final RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType, start, end, true, true);
doc.add(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range))));
json = "{ \"" + fieldName + "\" : { \n" +
" \"gte\" : \"" + start + "\",\n" +
" \"lte\" : \"" + end + "\"\n" +
" }}";
} else if (vst.equals(CoreValuesSourceType.GEOPOINT)) {
json = "{ \""
+ fieldName
+ "\" : { \n"
+ " \"gte\" : \""
+ start
+ "\",\n"
+ " \"lte\" : \""
+ end
+ "\"\n"
+ " }}";
} else if (vst.equals(CoreValuesSourceType.GEOPOINT)) {
double lat = randomDouble();
double lon = randomDouble();
doc.add(new LatLonDocValuesField(fieldName, lat, lon));
@ -850,8 +947,11 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
@Override
public IndexAnalyzers getIndexAnalyzers() {
NamedAnalyzer defaultAnalyzer = new NamedAnalyzer(AnalysisRegistry.DEFAULT_ANALYZER_NAME,
AnalyzerScope.GLOBAL, new StandardAnalyzer());
NamedAnalyzer defaultAnalyzer = new NamedAnalyzer(
AnalysisRegistry.DEFAULT_ANALYZER_NAME,
AnalyzerScope.GLOBAL,
new StandardAnalyzer()
);
return new IndexAnalyzers(singletonMap(AnalysisRegistry.DEFAULT_ANALYZER_NAME, defaultAnalyzer), emptyMap(), emptyMap());
}
}
@ -920,8 +1020,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
return new AggCardinalityAggregationBuilder(name);
}
private static class AggCardinalityAggregationBuilder
extends AbstractAggregationBuilder<AggCardinalityAggregationBuilder> {
private static class AggCardinalityAggregationBuilder extends AbstractAggregationBuilder<AggCardinalityAggregationBuilder> {
AggCardinalityAggregationBuilder(String name) {
super(name);
@ -929,7 +1028,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
@Override
protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subfactoriesBuilder)
throws IOException {
throws IOException {
return new AggregatorFactory(name, queryShardContext, parent, subfactoriesBuilder, metadata) {
@Override
protected Aggregator createInternal(
@ -1000,9 +1099,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
@Override
public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
aggregations.forEach(ia -> {
assertThat(((InternalAggCardinality) ia).cardinality, equalTo(cardinality));
});
aggregations.forEach(ia -> { assertThat(((InternalAggCardinality) ia).cardinality, equalTo(cardinality)); });
return new InternalAggCardinality(name, cardinality, metadata);
}
@ -1035,8 +1132,9 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase {
private static class AggCardinalityPlugin implements SearchPlugin {
@Override
public List<AggregationSpec> getAggregations() {
return singletonList(new AggregationSpec("agg_cardinality", in -> null,
(ContextParser<String, AggCardinalityAggregationBuilder>) (p, c) -> null));
return singletonList(
new AggregationSpec("agg_cardinality", in -> null, (ContextParser<String, AggCardinalityAggregationBuilder>) (p, c) -> null)
);
}
}
}

View File

@ -215,18 +215,18 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
protected void randomFieldOrScript(ValuesSourceAggregationBuilder<?> factory, String field) {
int choice = randomInt(2);
switch (choice) {
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(mockScript("_value + 1"));
break;
case 2:
factory.script(mockScript("doc[" + field + "] + 1"));
break;
default:
throw new AssertionError("Unknown random operation [" + choice + "]");
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(mockScript("_value + 1"));
break;
case 2:
factory.script(mockScript("doc[" + field + "] + 1"));
break;
default:
throw new AssertionError("Unknown random operation [" + choice + "]");
}
}

View File

@ -102,7 +102,7 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
List<NamedXContentRegistry.Entry> xContentEntries = searchModule.getNamedXContents();
xContentEntries.addAll(additionalNamedContents());
xContentRegistry = new NamedXContentRegistry(xContentEntries);
//create some random type with some default field, those types will stick around for all of the subclasses
// create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];
for (int i = 0; i < currentTypes.length; i++) {
String type = randomAlphaOfLengthBetween(1, 10);
@ -183,7 +183,6 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
}
}
public void testEqualsAndHashcode() throws IOException {
// TODO we only change name and boost, we should extend by any sub-test supplying a "mutate" method that randomly changes one
// aspect of the object under test
@ -213,7 +212,7 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
}
} else {
if (randomBoolean()) {
types = new String[]{Metadata.ALL};
types = new String[] { Metadata.ALL };
} else {
types = new String[0];
}
@ -256,8 +255,11 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
/**
* Helper for testing validation.
*/
protected String validate(Collection<AggregationBuilder> siblingAggregations,
Collection<PipelineAggregationBuilder> siblingPipelineAggregations, AF builder) {
protected String validate(
Collection<AggregationBuilder> siblingAggregations,
Collection<PipelineAggregationBuilder> siblingPipelineAggregations,
AF builder
) {
return validate(ValidationContext.forTreeRoot(siblingAggregations, siblingPipelineAggregations, null), builder);
}

View File

@ -54,8 +54,8 @@ import static java.util.Collections.singletonMap;
import static org.opensearch.common.xcontent.XContentHelper.toXContent;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent;
public abstract class InternalSingleBucketAggregationTestCase<T extends InternalSingleBucketAggregation>
extends InternalAggregationTestCase<T> {
public abstract class InternalSingleBucketAggregationTestCase<T extends InternalSingleBucketAggregation> extends
InternalAggregationTestCase<T> {
private boolean hasInternalMax;
private boolean hasInternalMin;
@ -80,6 +80,7 @@ public abstract class InternalSingleBucketAggregationTestCase<T extends Internal
}
protected abstract T createTestInstance(String name, long docCount, InternalAggregations aggregations, Map<String, Object> metadata);
protected abstract void extraAssertReduced(T reduced, List<T> inputs);
@Override
@ -96,27 +97,27 @@ public abstract class InternalSingleBucketAggregationTestCase<T extends Internal
InternalAggregations aggregations = instance.getAggregations();
Map<String, Object> metadata = instance.getMetadata();
switch (between(0, 3)) {
case 0:
name += randomAlphaOfLength(5);
break;
case 1:
docCount += between(1, 2000);
break;
case 2:
List<InternalAggregation> aggs = new ArrayList<>();
aggs.add(new InternalMax("new_max", randomDouble(), randomNumericDocValueFormat(), emptyMap()));
aggs.add(new InternalMin("new_min", randomDouble(), randomNumericDocValueFormat(), emptyMap()));
aggregations = InternalAggregations.from(aggs);
break;
case 3:
default:
if (metadata == null) {
metadata = new HashMap<>(1);
} else {
metadata = new HashMap<>(instance.getMetadata());
}
metadata.put(randomAlphaOfLength(15), randomInt());
break;
case 0:
name += randomAlphaOfLength(5);
break;
case 1:
docCount += between(1, 2000);
break;
case 2:
List<InternalAggregation> aggs = new ArrayList<>();
aggs.add(new InternalMax("new_max", randomDouble(), randomNumericDocValueFormat(), emptyMap()));
aggs.add(new InternalMin("new_min", randomDouble(), randomNumericDocValueFormat(), emptyMap()));
aggregations = InternalAggregations.from(aggs);
break;
case 3:
default:
if (metadata == null) {
metadata = new HashMap<>(1);
} else {
metadata = new HashMap<>(instance.getMetadata());
}
metadata.put(randomAlphaOfLength(15), randomInt());
break;
}
return createTestInstance(name, docCount, aggregations, metadata);
}
@ -126,17 +127,17 @@ public abstract class InternalSingleBucketAggregationTestCase<T extends Internal
assertEquals(inputs.stream().mapToLong(InternalSingleBucketAggregation::getDocCount).sum(), reduced.getDocCount());
if (hasInternalMax) {
double expected = inputs.stream().mapToDouble(i -> {
InternalMax max = i.getAggregations().get("max");
return max.getValue();
}).max().getAsDouble();
InternalMax max = i.getAggregations().get("max");
return max.getValue();
}).max().getAsDouble();
InternalMax reducedMax = reduced.getAggregations().get("max");
assertEquals(expected, reducedMax.getValue(), 0);
}
if (hasInternalMin) {
double expected = inputs.stream().mapToDouble(i -> {
InternalMin min = i.getAggregations().get("min");
return min.getValue();
}).min().getAsDouble();
InternalMin min = i.getAggregations().get("min");
return min.getValue();
}).min().getAsDouble();
InternalMin reducedMin = reduced.getAggregations().get("min");
assertEquals(expected, reducedMin.getValue(), 0);
}

View File

@ -58,12 +58,13 @@ public abstract class AbstractTermsTestCase extends OpenSearchIntegTestCase {
public void testOtherDocCount(String... fieldNames) {
for (String fieldName : fieldNames) {
SearchResponse allTerms = client().prepareSearch("idx")
.addAggregation(terms("terms")
.executionHint(randomExecutionHint())
.field(fieldName)
.size(10000)
.collectMode(randomFrom(SubAggCollectionMode.values())))
.get();
.addAggregation(
terms("terms").executionHint(randomExecutionHint())
.field(fieldName)
.size(10000)
.collectMode(randomFrom(SubAggCollectionMode.values()))
)
.get();
assertSearchResponse(allTerms);
Terms terms = allTerms.getAggregations().get("terms");
@ -74,13 +75,14 @@ public abstract class AbstractTermsTestCase extends OpenSearchIntegTestCase {
for (int size = 1; size < totalNumTerms + 2; size += randomIntBetween(1, 5)) {
for (int shardSize = size; shardSize <= totalNumTerms + 2; shardSize += randomIntBetween(1, 5)) {
SearchResponse resp = client().prepareSearch("idx")
.addAggregation(terms("terms")
.executionHint(randomExecutionHint())
.field(fieldName)
.size(size)
.shardSize(shardSize)
.collectMode(randomFrom(SubAggCollectionMode.values())))
.get();
.addAggregation(
terms("terms").executionHint(randomExecutionHint())
.field(fieldName)
.size(size)
.shardSize(shardSize)
.collectMode(randomFrom(SubAggCollectionMode.values()))
)
.get();
assertSearchResponse(resp);
terms = resp.getAggregations().get("terms");
assertEquals(Math.min(size, totalNumTerms), terms.getBuckets().size());

View File

@ -52,11 +52,18 @@ public abstract class AbstractNumericTestCase extends OpenSearchIntegTestCase {
final int numDocs = 10;
for (int i = 0; i < numDocs; i++) { // TODO randomize the size and the params in here?
builders.add(client().prepareIndex("idx", "type", String.valueOf(i)).setSource(jsonBuilder()
.startObject()
.field("value", i+1)
.startArray("values").value(i+2).value(i+3).endArray()
.endObject()));
builders.add(
client().prepareIndex("idx", "type", String.valueOf(i))
.setSource(
jsonBuilder().startObject()
.field("value", i + 1)
.startArray("values")
.value(i + 2)
.value(i + 3)
.endArray()
.endObject()
)
);
}
minValue = 1;
minValues = 2;
@ -71,10 +78,10 @@ public abstract class AbstractNumericTestCase extends OpenSearchIntegTestCase {
prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
builders = new ArrayList<>();
for (int i = 0; i < 2; i++) {
builders.add(client().prepareIndex("empty_bucket_idx", "type", String.valueOf(i)).setSource(jsonBuilder()
.startObject()
.field("value", i*2)
.endObject()));
builders.add(
client().prepareIndex("empty_bucket_idx", "type", String.valueOf(i))
.setSource(jsonBuilder().startObject().field("value", i * 2).endObject())
);
}
indexRandom(true, builders);
ensureSearchable();

View File

@ -109,11 +109,14 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
// Large snapshot pool settings to set up nodes for tests involving multiple repositories that need to have enough
// threads so that blocking some threads on one repository doesn't block other repositories from doing work
protected static final Settings LARGE_SNAPSHOT_POOL_SETTINGS = Settings.builder()
.put("thread_pool.snapshot.core", 5).put("thread_pool.snapshot.max", 5).build();
.put("thread_pool.snapshot.core", 5)
.put("thread_pool.snapshot.max", 5)
.build();
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
// Rebalancing is causing some checks after restore to randomly fail
// due to https://github.com/elastic/elasticsearch/issues/9421
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
@ -172,8 +175,7 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
public static long getFailureCount(String repository) {
long failureCount = 0;
for (RepositoriesService repositoriesService :
internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) {
for (RepositoriesService repositoriesService : internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) {
MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
failureCount += mockRepository.getFailureCount();
}
@ -230,8 +232,9 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
// Make sure that snapshot clean up operations are finished
ClusterStateResponse stateResponse = clusterAdmin().prepareState().get();
boolean found = false;
for (SnapshotsInProgress.Entry entry :
stateResponse.getState().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries()) {
for (SnapshotsInProgress.Entry entry : stateResponse.getState()
.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY)
.entries()) {
final Snapshot curr = entry.snapshot();
if (curr.getRepository().equals(repository) && curr.getSnapshotId().getName().equals(snapshotName)) {
found = true;
@ -250,35 +253,36 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName)
.repository(repositoryName)).setBlockAndFailOnWriteIndexFile();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
.setBlockAndFailOnWriteIndexFile();
return masterName;
}
public static String blockMasterOnWriteIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getMasterNodeInstance(RepositoriesService.class)
.repository(repositoryName)).setBlockOnWriteIndexFile();
((MockRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repositoryName))
.setBlockOnWriteIndexFile();
return masterName;
}
public static void blockMasterFromDeletingIndexNFile(String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName)
.repository(repositoryName)).setBlockOnDeleteIndexFile();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
.setBlockOnDeleteIndexFile();
}
public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName)
.repository(repositoryName)).setBlockAndFailOnWriteSnapFiles(true);
((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterName).repository(repositoryName))
.setBlockAndFailOnWriteSnapFiles(true);
return masterName;
}
public static String blockNodeWithIndex(final String repositoryName, final String indexName) {
for(String node : internalCluster().nodesInclude(indexName)) {
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName))
.blockOnDataFiles(true);
for (String node : internalCluster().nodesInclude(indexName)) {
((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName)).blockOnDataFiles(
true
);
return node;
}
fail("No nodes for the index " + indexName + " found");
@ -286,24 +290,24 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
public static void blockNodeOnAnyFiles(String repository, String nodeName) {
((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName)
.repository(repository)).setBlockOnAnyFiles(true);
((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).setBlockOnAnyFiles(
true
);
}
public static void blockDataNode(String repository, String nodeName) {
((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName)
.repository(repository)).blockOnDataFiles(true);
((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnDataFiles(true);
}
public static void blockAllDataNodes(String repository) {
for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository)repositoriesService.repository(repository)).blockOnDataFiles(true);
for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository) repositoriesService.repository(repository)).blockOnDataFiles(true);
}
}
public static void unblockAllDataNodes(String repository) {
for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository)repositoriesService.repository(repository)).unblock();
for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
((MockRepository) repositoriesService.repository(repository)).unblock();
}
}
@ -330,13 +334,12 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
public void unblockNode(final String repository, final String node) {
logger.info("--> unblocking [{}] on node [{}]", repository, node);
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository(repository)).unblock();
((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repository)).unblock();
}
protected void createRepository(String repoName, String type, Settings.Builder settings) {
logger.info("--> creating repository [{}] [{}]", repoName, type);
assertAcked(clusterAdmin().preparePutRepository(repoName)
.setType(type).setSettings(settings));
assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings));
}
protected void createRepository(String repoName, String type, Path location) {
@ -357,8 +360,7 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
protected static Settings.Builder indexSettingsNoReplicas(int shards) {
return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0);
return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shards).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0);
}
/**
@ -377,24 +379,30 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
protected String initWithSnapshotVersion(String repoName, Path repoPath, Version version) throws IOException {
assertThat("This hack only works on an empty repository", getRepositoryData(repoName).getSnapshotIds(), empty());
final String oldVersionSnapshot = OLD_VERSION_SNAPSHOT_PREFIX + version.id;
final CreateSnapshotResponse createSnapshotResponse = clusterAdmin()
.prepareCreateSnapshot(repoName, oldVersionSnapshot).setIndices("does-not-exist-for-sure-*")
.setWaitForCompletion(true).get();
final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, oldVersionSnapshot)
.setIndices("does-not-exist-for-sure-*")
.setWaitForCompletion(true)
.get();
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), is(0));
logger.info("--> writing downgraded RepositoryData for repository metadata version [{}]", version);
final RepositoryData repositoryData = getRepositoryData(repoName);
final XContentBuilder jsonBuilder = JsonXContent.contentBuilder();
repositoryData.snapshotsToXContent(jsonBuilder, version);
final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent(JsonXContent.jsonXContent.createParser(
final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent(
JsonXContent.jsonXContent.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString())),
repositoryData.getGenId(), randomBoolean());
Files.write(repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()),
BytesReference.toBytes(BytesReference.bytes(
downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), version))),
StandardOpenOption.TRUNCATE_EXISTING);
Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString())
),
repositoryData.getGenId(),
randomBoolean()
);
Files.write(
repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()),
BytesReference.toBytes(BytesReference.bytes(downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), version))),
StandardOpenOption.TRUNCATE_EXISTING
);
return oldVersionSnapshot;
}
@ -413,11 +421,11 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, List<String> indices) {
logger.info("--> creating snapshot [{}] of {} in [{}]", snapshot, indices, repositoryName);
final CreateSnapshotResponse response = client().admin()
.cluster()
.prepareCreateSnapshot(repositoryName, snapshot)
.setIndices(indices.toArray(Strings.EMPTY_ARRAY))
.setWaitForCompletion(true)
.get();
.cluster()
.prepareCreateSnapshot(repositoryName, snapshot)
.setIndices(indices.toArray(Strings.EMPTY_ARRAY))
.setWaitForCompletion(true)
.get();
final SnapshotInfo snapshotInfo = response.getSnapshotInfo();
assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
@ -444,8 +452,9 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
protected long getCountForIndex(String indexName) {
return client().search(new SearchRequest(new SearchRequest(indexName).source(
new SearchSourceBuilder().size(0).trackTotalHits(true)))).actionGet().getHits().getTotalHits().value;
return client().search(
new SearchRequest(new SearchRequest(indexName).source(new SearchSourceBuilder().size(0).trackTotalHits(true)))
).actionGet().getHits().getTotalHits().value;
}
protected void assertDocCount(String index, long count) {
@ -465,19 +474,40 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
assertNotNull(repositoriesMetadata);
final RepositoryMetadata initialRepoMetadata = repositoriesMetadata.repository(repoName);
assertNotNull(initialRepoMetadata);
assertThat("We can only manually insert a snapshot into a repository that does not have a generation tracked in the CS",
initialRepoMetadata.generation(), is(RepositoryData.UNKNOWN_REPO_GEN));
assertThat(
"We can only manually insert a snapshot into a repository that does not have a generation tracked in the CS",
initialRepoMetadata.generation(),
is(RepositoryData.UNKNOWN_REPO_GEN)
);
final Repository repo = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID(random()));
logger.info("--> adding old version FAILED snapshot [{}] to repository [{}]", snapshotId, repoName);
final SnapshotInfo snapshotInfo = new SnapshotInfo(snapshotId,
Collections.emptyList(), Collections.emptyList(),
SnapshotState.FAILED, "failed on purpose",
SnapshotsService.OLD_SNAPSHOT_FORMAT, 0L,0L, 0, 0, Collections.emptyList(),
randomBoolean(), metadata);
PlainActionFuture.<RepositoryData, Exception>get(f -> repo.finalizeSnapshot(
ShardGenerations.EMPTY, getRepositoryData(repoName).getGenId(), state.metadata(), snapshotInfo,
SnapshotsService.OLD_SNAPSHOT_FORMAT, Function.identity(), f));
final SnapshotInfo snapshotInfo = new SnapshotInfo(
snapshotId,
Collections.emptyList(),
Collections.emptyList(),
SnapshotState.FAILED,
"failed on purpose",
SnapshotsService.OLD_SNAPSHOT_FORMAT,
0L,
0L,
0,
0,
Collections.emptyList(),
randomBoolean(),
metadata
);
PlainActionFuture.<RepositoryData, Exception>get(
f -> repo.finalizeSnapshot(
ShardGenerations.EMPTY,
getRepositoryData(repoName).getGenId(),
state.metadata(),
snapshotInfo,
SnapshotsService.OLD_SNAPSHOT_FORMAT,
Function.identity(),
f
)
);
}
protected void awaitNoMoreRunningOperations() throws Exception {
@ -486,8 +516,11 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
protected void awaitNoMoreRunningOperations(String viaNode) throws Exception {
logger.info("--> verify no more operations in the cluster state");
awaitClusterState(viaNode, state -> state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty() &&
state.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY).hasDeletionsInProgress() == false);
awaitClusterState(
viaNode,
state -> state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty()
&& state.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY).hasDeletionsInProgress() == false
);
}
protected void awaitClusterState(Predicate<ClusterState> statePredicate) throws Exception {
@ -520,8 +553,8 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
}
protected ActionFuture<CreateSnapshotResponse> startFullSnapshotBlockedOnDataNode(String snapshotName, String repoName,
String dataNode) throws InterruptedException {
protected ActionFuture<CreateSnapshotResponse> startFullSnapshotBlockedOnDataNode(String snapshotName, String repoName, String dataNode)
throws InterruptedException {
blockDataNode(repoName, dataNode);
final ActionFuture<CreateSnapshotResponse> fut = startFullSnapshot(repoName, snapshotName);
waitForBlock(dataNode, repoName, TimeValue.timeValueSeconds(30L));
@ -534,14 +567,12 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
protected ActionFuture<CreateSnapshotResponse> startFullSnapshot(String repoName, String snapshotName, boolean partial) {
logger.info("--> creating full snapshot [{}] to repo [{}]", snapshotName, repoName);
return clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true)
.setPartial(partial).execute();
return clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setPartial(partial).execute();
}
protected void awaitNumberOfSnapshotsInProgress(int count) throws Exception {
logger.info("--> wait for [{}] snapshots to show up in the cluster state", count);
awaitClusterState(state ->
state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().size() == count);
awaitClusterState(state -> state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().size() == count);
}
protected static SnapshotInfo assertSuccessful(ActionFuture<CreateSnapshotResponse> future) throws Exception {
@ -591,8 +622,7 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestC
}
protected SnapshotInfo getSnapshot(String repository, String snapshot) {
final List<SnapshotInfo> snapshotInfos = clusterAdmin().prepareGetSnapshots(repository).setSnapshots(snapshot)
.get().getSnapshots();
final List<SnapshotInfo> snapshotInfos = clusterAdmin().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
assertThat(snapshotInfos, hasSize(1));
return snapshotInfos.get(0);
}

View File

@ -81,15 +81,23 @@ public class MockRepository extends FsRepository {
public static class Plugin extends org.opensearch.plugins.Plugin implements RepositoryPlugin {
public static final Setting<String> USERNAME_SETTING = Setting.simpleString("secret.mock.username", Property.NodeScope);
public static final Setting<String> PASSWORD_SETTING =
Setting.simpleString("secret.mock.password", Property.NodeScope, Property.Filtered);
public static final Setting<String> PASSWORD_SETTING = Setting.simpleString(
"secret.mock.password",
Property.NodeScope,
Property.Filtered
);
@Override
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService, RecoverySettings recoverySettings) {
return Collections.singletonMap("mock", (metadata) ->
new MockRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings));
public Map<String, Repository.Factory> getRepositories(
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
RecoverySettings recoverySettings
) {
return Collections.singletonMap(
"mock",
(metadata) -> new MockRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings)
);
}
@Override
@ -156,9 +164,13 @@ public class MockRepository extends FsRepository {
private volatile boolean blocked = false;
private volatile boolean setThrowExceptionWhileDelete;
public MockRepository(RepositoryMetadata metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry, ClusterService clusterService,
RecoverySettings recoverySettings) {
public MockRepository(
RepositoryMetadata metadata,
Environment environment,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
RecoverySettings recoverySettings
) {
super(overrideSettings(metadata, environment), environment, namedXContentRegistry, clusterService, recoverySettings);
randomControlIOExceptionRate = metadata.settings().getAsDouble("random_control_io_exception_rate", 0.0);
randomDataFileIOExceptionRate = metadata.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
@ -184,8 +196,11 @@ public class MockRepository extends FsRepository {
if (metadata.settings().getAsBoolean("localize_location", false)) {
Path location = PathUtils.get(metadata.settings().get("location"));
location = location.resolve(Integer.toString(environment.hashCode()));
return new RepositoryMetadata(metadata.name(), metadata.type(),
Settings.builder().put(metadata.settings()).put("location", location.toAbsolutePath()).build());
return new RepositoryMetadata(
metadata.name(),
metadata.type(),
Settings.builder().put(metadata.settings()).put("location", location.toAbsolutePath()).build()
);
} else {
return metadata;
}
@ -274,8 +289,14 @@ public class MockRepository extends FsRepository {
logger.debug("[{}] Blocking execution", metadata.name());
boolean wasBlocked = false;
try {
while (blockOnDataFiles || blockOnAnyFiles || blockAndFailOnWriteIndexFile || blockOnWriteIndexFile ||
blockAndFailOnWriteSnapFile || blockOnDeleteIndexN || blockOnWriteShardLevelMeta || blockOnReadIndexMeta) {
while (blockOnDataFiles
|| blockOnAnyFiles
|| blockAndFailOnWriteIndexFile
|| blockOnWriteIndexFile
|| blockAndFailOnWriteSnapFile
|| blockOnDeleteIndexN
|| blockOnWriteShardLevelMeta
|| blockOnReadIndexMeta) {
blocked = true;
this.wait();
wasBlocked = true;
@ -332,8 +353,7 @@ public class MockRepository extends FsRepository {
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] bytes = digest.digest(path.getBytes("UTF-8"));
int i = 0;
return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
| ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16) | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
} catch (NoSuchAlgorithmException | UnsupportedEncodingException ex) {
throw new OpenSearchException("cannot calculate hashcode", ex);
}
@ -411,7 +431,7 @@ public class MockRepository extends FsRepository {
@Override
public InputStream readBlob(String name) throws IOException {
if (blockOnReadIndexMeta && name.startsWith(BlobStoreRepository.METADATA_PREFIX) && path().equals(basePath()) == false) {
if (blockOnReadIndexMeta && name.startsWith(BlobStoreRepository.METADATA_PREFIX) && path().equals(basePath()) == false) {
blockExecutionAndMaybeWait(name);
} else {
maybeReadErrorAfterBlock(name);
@ -444,15 +464,14 @@ public class MockRepository extends FsRepository {
deleteBlobsIgnoringIfNotExists(Collections.singletonList(blob));
deleteByteCount += blobs.get(blob).length();
}
blobStore().blobContainer(path().parent()).deleteBlobsIgnoringIfNotExists(
Collections.singletonList(path().toArray()[path().toArray().length - 1]));
blobStore().blobContainer(path().parent())
.deleteBlobsIgnoringIfNotExists(Collections.singletonList(path().toArray()[path().toArray().length - 1]));
return deleteResult.add(deleteBlobCount, deleteByteCount);
}
@Override
public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
if (blockOnDeleteIndexN && blobNames.stream().anyMatch(
name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX))) {
if (blockOnDeleteIndexN && blobNames.stream().anyMatch(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX))) {
blockExecutionAndMaybeWait("index-{N}");
}
super.deleteBlobsIgnoringIfNotExists(blobNames);
@ -480,11 +499,11 @@ public class MockRepository extends FsRepository {
}
@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
throws IOException {
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
maybeIOExceptionOrBlock(blobName);
if (blockOnWriteShardLevelMeta && blobName.startsWith(BlobStoreRepository.SNAPSHOT_PREFIX)
&& path().equals(basePath()) == false) {
if (blockOnWriteShardLevelMeta
&& blobName.startsWith(BlobStoreRepository.SNAPSHOT_PREFIX)
&& path().equals(basePath()) == false) {
blockExecutionAndMaybeWait(blobName);
}
super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
@ -496,8 +515,12 @@ public class MockRepository extends FsRepository {
}
@Override
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize,
final boolean failIfAlreadyExists) throws IOException {
public void writeBlobAtomic(
final String blobName,
final InputStream inputStream,
final long blobSize,
final boolean failIfAlreadyExists
) throws IOException {
final Random random = RandomizedContext.current().getRandom();
if (failOnIndexLatest && BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName)) {
throw new IOException("Random IOException");

View File

@ -49,9 +49,10 @@ public abstract class AbstractBootstrapCheckTestCase extends OpenSearchTestCase
protected BootstrapContext createTestContext(Settings settings, Metadata metadata) {
Path homePath = createTempDir();
Environment environment = new Environment(settings(Version.CURRENT)
.put(settings)
.put(Environment.PATH_HOME_SETTING.getKey(), homePath.toString()).build(), null);
Environment environment = new Environment(
settings(Version.CURRENT).put(settings).put(Environment.PATH_HOME_SETTING.getKey(), homePath.toString()).build(),
null
);
return new BootstrapContext(environment, metadata);
}
}

View File

@ -77,8 +77,12 @@ public abstract class AbstractBroadcastResponseTestCase<T extends BroadcastRespo
return createTestInstance(totalShards, successfulShards, failedShards, failures);
}
protected abstract T createTestInstance(int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> failures);
protected abstract T createTestInstance(
int totalShards,
int successfulShards,
int failedShards,
List<DefaultShardOperationFailedException> failures
);
@Override
protected void assertEqualInstances(T response, T parsedResponse) {
@ -127,7 +131,7 @@ public abstract class AbstractBroadcastResponseTestCase<T extends BroadcastRespo
XContentType xContentType = randomFrom(XContentType.values());
BytesReference bytesReference = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
T parsedResponse;
try(XContentParser parser = createParser(xContentType.xContent(), bytesReference)) {
try (XContentParser parser = createParser(xContentType.xContent(), bytesReference)) {
parsedResponse = doParseInstance(parser);
assertNull(parser.nextToken());
}

View File

@ -135,13 +135,32 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point";
protected static final String GEO_POINT_ALIAS_FIELD_NAME = "mapped_geo_point_alias";
protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape";
protected static final String[] MAPPED_FIELD_NAMES = new String[]{TEXT_FIELD_NAME, TEXT_ALIAS_FIELD_NAME,
INT_FIELD_NAME, INT_RANGE_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_NANOS_FIELD_NAME, DATE_FIELD_NAME,
DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_POINT_ALIAS_FIELD_NAME,
GEO_SHAPE_FIELD_NAME};
protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{TEXT_FIELD_NAME, TEXT_ALIAS_FIELD_NAME,
INT_FIELD_NAME, INT_RANGE_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_NANOS_FIELD_NAME,
DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_POINT_ALIAS_FIELD_NAME};
protected static final String[] MAPPED_FIELD_NAMES = new String[] {
TEXT_FIELD_NAME,
TEXT_ALIAS_FIELD_NAME,
INT_FIELD_NAME,
INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME,
BOOLEAN_FIELD_NAME,
DATE_NANOS_FIELD_NAME,
DATE_FIELD_NAME,
DATE_RANGE_FIELD_NAME,
OBJECT_FIELD_NAME,
GEO_POINT_FIELD_NAME,
GEO_POINT_ALIAS_FIELD_NAME,
GEO_SHAPE_FIELD_NAME };
protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[] {
TEXT_FIELD_NAME,
TEXT_ALIAS_FIELD_NAME,
INT_FIELD_NAME,
INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME,
BOOLEAN_FIELD_NAME,
DATE_NANOS_FIELD_NAME,
DATE_FIELD_NAME,
DATE_RANGE_FIELD_NAME,
GEO_POINT_FIELD_NAME,
GEO_POINT_ALIAS_FIELD_NAME };
private static final Map<String, String> ALIAS_TO_CONCRETE_FIELD_NAME = new HashMap<>();
static {
@ -167,8 +186,7 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
return Collections.singletonList(TestGeoShapeFieldMapperPlugin.class);
}
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
}
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {}
@BeforeClass
public static void beforeClass() {
@ -201,11 +219,10 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
protected Settings createTestIndexSettings() {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
Version indexVersionCreated = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_6_0_0, Version.CURRENT);
return Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, indexVersionCreated)
.build();
Version indexVersionCreated = randomBoolean()
? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_6_0_0, Version.CURRENT);
return Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexVersionCreated).build();
}
protected static IndexSettings indexSettings() {
@ -238,10 +255,22 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
// this setup
long masterSeed = SeedUtils.parseSeed(RandomizedTest.getContext().getRunnerSeedAsString());
RandomizedTest.getContext().runWithPrivateRandomness(masterSeed, (Callable<Void>) () -> {
serviceHolder = new ServiceHolder(nodeSettings, createTestIndexSettings(), getPlugins(), nowInMillis,
AbstractBuilderTestCase.this, true);
serviceHolderWithNoType = new ServiceHolder(nodeSettings, createTestIndexSettings(), getPlugins(), nowInMillis,
AbstractBuilderTestCase.this, false);
serviceHolder = new ServiceHolder(
nodeSettings,
createTestIndexSettings(),
getPlugins(),
nowInMillis,
AbstractBuilderTestCase.this,
true
);
serviceHolderWithNoType = new ServiceHolder(
nodeSettings,
createTestIndexSettings(),
getPlugins(),
nowInMillis,
AbstractBuilderTestCase.this,
false
);
return null;
});
}
@ -296,7 +325,7 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))){
if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))) {
GetResponse getResponse = delegate.executeGet((GetRequest) args[0]);
ActionListener<GetResponse> listener = (ActionListener<GetResponse>) args[1];
if (randomBoolean()) {
@ -305,8 +334,7 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
new Thread(() -> listener.onResponse(getResponse)).start();
}
return null;
} else if (method.equals(Client.class.getMethod
("multiTermVectors", MultiTermVectorsRequest.class))) {
} else if (method.equals(Client.class.getMethod("multiTermVectors", MultiTermVectorsRequest.class))) {
return new PlainActionFuture<MultiTermVectorsResponse>() {
@Override
public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException {
@ -335,37 +363,42 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
private final Client client;
private final long nowInMillis;
ServiceHolder(Settings nodeSettings,
Settings indexSettings,
Collection<Class<? extends Plugin>> plugins,
long nowInMillis,
AbstractBuilderTestCase testCase,
boolean registerType) throws IOException {
ServiceHolder(
Settings nodeSettings,
Settings indexSettings,
Collection<Class<? extends Plugin>> plugins,
long nowInMillis,
AbstractBuilderTestCase testCase,
boolean registerType
) throws IOException {
this.nowInMillis = nowInMillis;
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, emptyMap(),
null, () -> {
throw new AssertionError("node.name must be set");
});
Environment env = InternalSettingsPreparer.prepareEnvironment(
nodeSettings,
emptyMap(),
null,
() -> { throw new AssertionError("node.name must be set"); }
);
PluginsService pluginsService;
pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins);
client = (Client) Proxy.newProxyInstance(
Client.class.getClassLoader(),
new Class[]{Client.class},
clientInvocationHandler);
client = (Client) Proxy.newProxyInstance(Client.class.getClassLoader(), new Class[] { Client.class }, clientInvocationHandler);
ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class));
List<Setting<?>> additionalSettings = pluginsService.getPluginSettings();
SettingsModule settingsModule =
new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter(), Collections.emptySet());
SettingsModule settingsModule = new SettingsModule(
nodeSettings,
additionalSettings,
pluginsService.getPluginSettingsFilter(),
Collections.emptySet()
);
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
xContentRegistry = new NamedXContentRegistry(Stream.of(
searchModule.getNamedXContents().stream()
).flatMap(Function.identity()).collect(toList()));
xContentRegistry = new NamedXContentRegistry(
Stream.of(searchModule.getNamedXContents().stream()).flatMap(Function.identity()).collect(toList())
);
IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings();
idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings);
AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList());
@ -373,12 +406,24 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
scriptService = new MockScriptService(Settings.EMPTY, scriptModule.engines, scriptModule.contexts);
similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap());
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
mapperService = new MapperService(idxSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry,
() -> createShardContext(null), () -> false, null);
mapperService = new MapperService(
idxSettings,
indexAnalyzers,
xContentRegistry,
similarityService,
mapperRegistry,
() -> createShardContext(null),
() -> false,
null
);
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache,
new NoneCircuitBreakerService(), mapperService);
indexFieldDataService = new IndexFieldDataService(
idxSettings,
indicesFieldDataCache,
new NoneCircuitBreakerService(),
mapperService
);
bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
@ -392,29 +437,64 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
});
if (registerType) {
mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef("_doc",
TEXT_FIELD_NAME, "type=text",
KEYWORD_FIELD_NAME, "type=keyword",
TEXT_ALIAS_FIELD_NAME, "type=alias,path=" + TEXT_FIELD_NAME,
INT_FIELD_NAME, "type=integer",
INT_ALIAS_FIELD_NAME, "type=alias,path=" + INT_FIELD_NAME,
INT_RANGE_FIELD_NAME, "type=integer_range",
DOUBLE_FIELD_NAME, "type=double",
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_NANOS_FIELD_NAME, "type=date_nanos",
DATE_FIELD_NAME, "type=date",
DATE_ALIAS_FIELD_NAME, "type=alias,path=" + DATE_FIELD_NAME,
DATE_RANGE_FIELD_NAME, "type=date_range",
OBJECT_FIELD_NAME, "type=object",
GEO_POINT_FIELD_NAME, "type=geo_point",
GEO_POINT_ALIAS_FIELD_NAME, "type=alias,path=" + GEO_POINT_FIELD_NAME,
GEO_SHAPE_FIELD_NAME, "type=geo_shape"
))), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(
"_doc",
new CompressedXContent(
Strings.toString(
PutMappingRequest.buildFromSimplifiedDef(
"_doc",
TEXT_FIELD_NAME,
"type=text",
KEYWORD_FIELD_NAME,
"type=keyword",
TEXT_ALIAS_FIELD_NAME,
"type=alias,path=" + TEXT_FIELD_NAME,
INT_FIELD_NAME,
"type=integer",
INT_ALIAS_FIELD_NAME,
"type=alias,path=" + INT_FIELD_NAME,
INT_RANGE_FIELD_NAME,
"type=integer_range",
DOUBLE_FIELD_NAME,
"type=double",
BOOLEAN_FIELD_NAME,
"type=boolean",
DATE_NANOS_FIELD_NAME,
"type=date_nanos",
DATE_FIELD_NAME,
"type=date",
DATE_ALIAS_FIELD_NAME,
"type=alias,path=" + DATE_FIELD_NAME,
DATE_RANGE_FIELD_NAME,
"type=date_range",
OBJECT_FIELD_NAME,
"type=object",
GEO_POINT_FIELD_NAME,
"type=geo_point",
GEO_POINT_ALIAS_FIELD_NAME,
"type=alias,path=" + GEO_POINT_FIELD_NAME,
GEO_SHAPE_FIELD_NAME,
"type=geo_shape"
)
)
),
MapperService.MergeReason.MAPPING_UPDATE
);
// also add mappings for two inner field in the object field
mapperService.merge("_doc", new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\","
+ "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" +
INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"),
MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(
"_doc",
new CompressedXContent(
"{\"properties\":{\""
+ OBJECT_FIELD_NAME
+ "\":{\"type\":\"object\","
+ "\"properties\":{\""
+ DATE_FIELD_NAME
+ "\":{\"type\":\"date\"},\""
+ INT_FIELD_NAME
+ "\":{\"type\":\"integer\"}}}}}"
),
MapperService.MergeReason.MAPPING_UPDATE
);
testCase.initializeAdditionalMappings(mapperService);
}
}
@ -425,13 +505,28 @@ public abstract class AbstractBuilderTestCase extends OpenSearchTestCase {
}
@Override
public void close() throws IOException {
}
public void close() throws IOException {}
QueryShardContext createShardContext(IndexSearcher searcher) {
return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, bitsetFilterCache,
indexFieldDataService::getForField, mapperService, similarityService, scriptService, xContentRegistry,
namedWriteableRegistry, this.client, searcher, () -> nowInMillis, null, indexNameMatcher(), () -> true, null);
return new QueryShardContext(
0,
idxSettings,
BigArrays.NON_RECYCLING_INSTANCE,
bitsetFilterCache,
indexFieldDataService::getForField,
mapperService,
similarityService,
scriptService,
xContentRegistry,
namedWriteableRegistry,
this.client,
searcher,
() -> nowInMillis,
null,
indexNameMatcher(),
() -> true,
null
);
}
ScriptModule createScriptModule(List<ScriptPlugin> scriptPlugins) {

View File

@ -55,7 +55,12 @@ public abstract class AbstractDiffableSerializationTestCase<T extends Diffable<T
protected abstract Reader<Diff<T>> diffReader();
public final void testDiffableSerialization() throws IOException {
DiffableTestUtils.testDiffableSerialization(this::createTestInstance, this::makeTestChanges, getNamedWriteableRegistry(),
instanceReader(), diffReader());
DiffableTestUtils.testDiffableSerialization(
this::createTestInstance,
this::makeTestChanges,
getNamedWriteableRegistry(),
instanceReader(),
diffReader()
);
}
}

View File

@ -53,7 +53,12 @@ public abstract class AbstractDiffableWireSerializationTestCase<T extends Diffab
protected abstract Reader<Diff<T>> diffReader();
public final void testDiffableSerialization() throws IOException {
DiffableTestUtils.testDiffableSerialization(this::createTestInstance, this::makeTestChanges, getNamedWriteableRegistry(),
instanceReader(), diffReader());
DiffableTestUtils.testDiffableSerialization(
this::createTestInstance,
this::makeTestChanges,
getNamedWriteableRegistry(),
instanceReader(),
diffReader()
);
}
}

View File

@ -113,13 +113,28 @@ public abstract class AbstractMultiClustersTestCase extends OpenSearchTestCase {
for (String clusterAlias : clusterAliases) {
final String clusterName = clusterAlias.equals(LOCAL_CLUSTER) ? "main-cluster" : clusterAlias;
final int numberOfNodes = randomIntBetween(1, 3);
final List<Class<? extends Plugin>> mockPlugins =
Arrays.asList(MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, MockNioTransportPlugin.class);
final List<Class<? extends Plugin>> mockPlugins = Arrays.asList(
MockHttpTransport.TestPlugin.class,
MockTransportService.TestPlugin.class,
MockNioTransportPlugin.class
);
final Collection<Class<? extends Plugin>> nodePlugins = nodePlugins(clusterAlias);
final Settings nodeSettings = Settings.EMPTY;
final NodeConfigurationSource nodeConfigurationSource = nodeConfigurationSource(nodeSettings, nodePlugins);
final InternalTestCluster cluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodes,
numberOfNodes, clusterName, nodeConfigurationSource, 0, clusterName + "-", mockPlugins, Function.identity());
final InternalTestCluster cluster = new InternalTestCluster(
randomLong(),
createTempDir(),
true,
true,
numberOfNodes,
numberOfNodes,
clusterName,
nodeConfigurationSource,
0,
clusterName + "-",
mockPlugins,
Function.identity()
);
cluster.beforeTest(random(), 0);
clusters.put(clusterAlias, cluster);
}
@ -173,16 +188,19 @@ public abstract class AbstractMultiClustersTestCase extends OpenSearchTestCase {
Settings.Builder settings = Settings.builder();
for (Map.Entry<String, List<String>> entry : seedNodes.entrySet()) {
final String clusterAlias = entry.getKey();
final String seeds = entry.getValue().stream()
final String seeds = entry.getValue()
.stream()
.map(node -> cluster(clusterAlias).getInstance(TransportService.class, node).boundAddress().publishAddress().toString())
.collect(Collectors.joining(","));
settings.put("cluster.remote." + clusterAlias + ".seeds", seeds);
}
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get();
assertBusy(() -> {
List<RemoteConnectionInfo> remoteConnectionInfos = client()
.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).actionGet().getInfos()
.stream().filter(RemoteConnectionInfo::isConnected)
List<RemoteConnectionInfo> remoteConnectionInfos = client().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest())
.actionGet()
.getInfos()
.stream()
.filter(RemoteConnectionInfo::isConnected)
.collect(Collectors.toList());
final long totalConnections = seedNodes.values().stream().map(List::size).count();
assertThat(remoteConnectionInfos, hasSize(Math.toIntExact(totalConnections)));

View File

@ -90,7 +90,6 @@ import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>> extends AbstractBuilderTestCase {
private static final int NUMBER_OF_TESTQUERIES = 20;
@ -117,8 +116,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
public void testNegativeBoosts() {
QB testQuery = createTestQueryBuilder();
IllegalArgumentException exc =
expectThrows(IllegalArgumentException.class, () -> testQuery.boost(-0.5f));
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> testQuery.boost(-0.5f));
assertThat(exc.getMessage(), containsString("negative [boost]"));
}
@ -130,8 +128,13 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) {
QB testQuery = createTestQueryBuilder();
XContentType xContentType = randomFrom(XContentType.values());
BytesReference shuffledXContent = toShuffledXContent(testQuery, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean(),
shuffleProtectedFields());
BytesReference shuffledXContent = toShuffledXContent(
testQuery,
xContentType,
ToXContent.EMPTY_PARAMS,
randomBoolean(),
shuffleProtectedFields()
);
assertParsedQuery(createParser(xContentType.xContent(), shuffledXContent), testQuery);
for (Map.Entry<String, QB> alternateVersion : getAlternateVersions().entrySet()) {
String queryAsString = alternateVersion.getKey();
@ -261,8 +264,11 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
BytesStreamOutput out = new BytesStreamOutput();
try (
XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out);
XContentParser parser = JsonXContent.jsonXContent
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, query);
XContentParser parser = JsonXContent.jsonXContent.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
query
);
) {
int objectIndex = -1;
Deque<String> levels = new LinkedList<>();
@ -364,9 +370,11 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
}
}
String testQuery = validQuery.substring(0, insertionPosition) + "[" +
validQuery.substring(insertionPosition, endArrayPosition) + "]" +
validQuery.substring(endArrayPosition, validQuery.length());
String testQuery = validQuery.substring(0, insertionPosition)
+ "["
+ validQuery.substring(insertionPosition, endArrayPosition)
+ "]"
+ validQuery.substring(endArrayPosition, validQuery.length());
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(testQuery));
assertEquals("[" + queryName + "] query malformed, no start_object after query name", e.getMessage());
@ -441,20 +449,32 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
Query firstLuceneQuery = rewritten.toQuery(context);
assertNotNull("toQuery should not return null", firstLuceneQuery);
assertLuceneQuery(firstQuery, firstLuceneQuery, context);
//remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well
// remove after assertLuceneQuery since the assertLuceneQuery impl might access the context as well
assertTrue(
"query is not equal to its copy after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery,
firstQuery.equals(controlQuery));
assertTrue("equals is not symmetric after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery,
controlQuery.equals(firstQuery));
assertThat("query copy's hashcode is different from original hashcode after calling toQuery, firstQuery: " + firstQuery
+ ", secondQuery: " + controlQuery, controlQuery.hashCode(), equalTo(firstQuery.hashCode()));
"query is not equal to its copy after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery,
firstQuery.equals(controlQuery)
);
assertTrue(
"equals is not symmetric after calling toQuery, firstQuery: " + firstQuery + ", secondQuery: " + controlQuery,
controlQuery.equals(firstQuery)
);
assertThat(
"query copy's hashcode is different from original hashcode after calling toQuery, firstQuery: "
+ firstQuery
+ ", secondQuery: "
+ controlQuery,
controlQuery.hashCode(),
equalTo(firstQuery.hashCode())
);
QB secondQuery = copyQuery(firstQuery);
// query _name never should affect the result of toQuery, we randomly set it to make sure
if (randomBoolean()) {
secondQuery.queryName(secondQuery.queryName() == null ? randomAlphaOfLengthBetween(1, 30) : secondQuery.queryName()
+ randomAlphaOfLengthBetween(1, 10));
secondQuery.queryName(
secondQuery.queryName() == null
? randomAlphaOfLengthBetween(1, 30)
: secondQuery.queryName() + randomAlphaOfLengthBetween(1, 10)
);
}
context = new QueryShardContext(context);
Query secondLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context);
@ -462,17 +482,26 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
assertLuceneQuery(secondQuery, secondLuceneQuery, context);
if (builderGeneratesCacheableQueries()) {
assertEquals("two equivalent query builders lead to different lucene queries hashcode",
secondLuceneQuery.hashCode(), firstLuceneQuery.hashCode());
assertEquals("two equivalent query builders lead to different lucene queries",
rewrite(secondLuceneQuery), rewrite(firstLuceneQuery));
assertEquals(
"two equivalent query builders lead to different lucene queries hashcode",
secondLuceneQuery.hashCode(),
firstLuceneQuery.hashCode()
);
assertEquals(
"two equivalent query builders lead to different lucene queries",
rewrite(secondLuceneQuery),
rewrite(firstLuceneQuery)
);
}
if (supportsBoost() && firstLuceneQuery instanceof MatchNoDocsQuery == false) {
secondQuery.boost(firstQuery.boost() + 1f + randomFloat());
Query thirdLuceneQuery = rewriteQuery(secondQuery, context).toQuery(context);
assertNotEquals("modifying the boost doesn't affect the corresponding lucene query", rewrite(firstLuceneQuery),
rewrite(thirdLuceneQuery));
assertNotEquals(
"modifying the boost doesn't affect the corresponding lucene query",
rewrite(firstLuceneQuery),
rewrite(thirdLuceneQuery)
);
}
}
}
@ -516,8 +545,10 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
}
if (query != null) {
if (queryBuilder.boost() != AbstractQueryBuilder.DEFAULT_BOOST) {
assertThat(query, either(instanceOf(BoostQuery.class)).or(instanceOf(SpanBoostQuery.class))
.or(instanceOf(MatchNoDocsQuery.class)));
assertThat(
query,
either(instanceOf(BoostQuery.class)).or(instanceOf(SpanBoostQuery.class)).or(instanceOf(MatchNoDocsQuery.class))
);
if (query instanceof SpanBoostQuery) {
SpanBoostQuery spanBoostQuery = (SpanBoostQuery) query;
assertThat(spanBoostQuery.getBoost(), equalTo(queryBuilder.boost()));
@ -621,15 +652,18 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
protected QB changeNameOrBoost(QB original) throws IOException {
QB secondQuery = copyQuery(original);
if (randomBoolean()) {
secondQuery.queryName(secondQuery.queryName() == null ? randomAlphaOfLengthBetween(1, 30) : secondQuery.queryName()
+ randomAlphaOfLengthBetween(1, 10));
secondQuery.queryName(
secondQuery.queryName() == null
? randomAlphaOfLengthBetween(1, 30)
: secondQuery.queryName() + randomAlphaOfLengthBetween(1, 10)
);
} else {
secondQuery.boost(original.boost() + 1f + randomFloat());
}
return secondQuery;
}
//we use the streaming infra to create a copy of the query provided as argument
// we use the streaming infra to create a copy of the query provided as argument
@SuppressWarnings("unchecked")
private QB copyQuery(QB query) throws IOException {
Reader<QB> reader = (Reader<QB>) namedWriteableRegistry().getReader(QueryBuilder.class, query.getWriteableName());
@ -702,13 +736,11 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
protected static String getRandomRewriteMethod() {
String rewrite;
if (randomBoolean()) {
rewrite = randomFrom(QueryParsers.CONSTANT_SCORE,
QueryParsers.SCORING_BOOLEAN,
QueryParsers.CONSTANT_SCORE_BOOLEAN).getPreferredName();
rewrite = randomFrom(QueryParsers.CONSTANT_SCORE, QueryParsers.SCORING_BOOLEAN, QueryParsers.CONSTANT_SCORE_BOOLEAN)
.getPreferredName();
} else {
rewrite = randomFrom(QueryParsers.TOP_TERMS,
QueryParsers.TOP_TERMS_BOOST,
QueryParsers.TOP_TERMS_BLENDED_FREQS).getPreferredName() + "1";
rewrite = randomFrom(QueryParsers.TOP_TERMS, QueryParsers.TOP_TERMS_BOOST, QueryParsers.TOP_TERMS_BLENDED_FREQS)
.getPreferredName() + "1";
}
return rewrite;
}
@ -760,9 +792,10 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
source.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals(
msg(expected, Strings.toString(builder)),
expected.replaceAll("\\s+", ""),
Strings.toString(builder).replaceAll("\\s+", ""));
msg(expected, Strings.toString(builder)),
expected.replaceAll("\\s+", ""),
Strings.toString(builder).replaceAll("\\s+", "")
);
}
private static String msg(String left, String right) {
@ -773,18 +806,36 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
if (left.charAt(i) == right.charAt(i)) {
builder.append(left.charAt(i));
} else {
builder.append(">> ").append("until offset: ").append(i)
.append(" [").append(left.charAt(i)).append(" vs.").append(right.charAt(i))
.append("] [").append((int) left.charAt(i)).append(" vs.").append((int) right.charAt(i)).append(']');
builder.append(">> ")
.append("until offset: ")
.append(i)
.append(" [")
.append(left.charAt(i))
.append(" vs.")
.append(right.charAt(i))
.append("] [")
.append((int) left.charAt(i))
.append(" vs.")
.append((int) right.charAt(i))
.append(']');
return builder.toString();
}
}
if (left.length() != right.length()) {
int leftEnd = Math.max(size, left.length()) - 1;
int rightEnd = Math.max(size, right.length()) - 1;
builder.append(">> ").append("until offset: ").append(size)
.append(" [").append(left.charAt(leftEnd)).append(" vs.").append(right.charAt(rightEnd))
.append("] [").append((int) left.charAt(leftEnd)).append(" vs.").append((int) right.charAt(rightEnd)).append(']');
builder.append(">> ")
.append("until offset: ")
.append(size)
.append(" [")
.append(left.charAt(leftEnd))
.append(" vs.")
.append(right.charAt(rightEnd))
.append("] [")
.append((int) left.charAt(leftEnd))
.append(" vs.")
.append((int) right.charAt(rightEnd))
.append(']');
return builder.toString();
}
return "";

View File

@ -53,8 +53,9 @@ public abstract class AbstractSerializingTestCase<T extends ToXContent & Writeab
* both for equality and asserts equality on the two instances.
*/
public final void testFromXContent() throws IOException {
xContentTester(this::createParser, this::createXContextTestInstance, getToXContentParams(), this::doParseInstance)
.numberOfTestRuns(NUMBER_OF_TEST_RUNS)
xContentTester(this::createParser, this::createXContextTestInstance, getToXContentParams(), this::doParseInstance).numberOfTestRuns(
NUMBER_OF_TEST_RUNS
)
.supportsUnknownFields(supportsUnknownFields())
.shuffleFieldsExceptions(getShuffleFieldsExceptions())
.randomFieldsExcludeFilter(getRandomFieldsExcludeFilter())

View File

@ -53,58 +53,57 @@ import java.util.function.Supplier;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent;
public abstract class AbstractXContentTestCase<T extends ToXContent> extends OpenSearchTestCase {
protected static final int NUMBER_OF_TEST_RUNS = 20;
public static <T> XContentTester<T> xContentTester(
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
CheckedBiConsumer<T, XContentBuilder, IOException> toXContent,
CheckedFunction<XContentParser, T, IOException> fromXContent) {
return new XContentTester<>(
createParser,
x -> instanceSupplier.get(),
(testInstance, xContentType) -> {
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
toXContent.accept(testInstance, builder);
return BytesReference.bytes(builder);
}
},
fromXContent);
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
CheckedBiConsumer<T, XContentBuilder, IOException> toXContent,
CheckedFunction<XContentParser, T, IOException> fromXContent
) {
return new XContentTester<>(createParser, x -> instanceSupplier.get(), (testInstance, xContentType) -> {
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
toXContent.accept(testInstance, builder);
return BytesReference.bytes(builder);
}
}, fromXContent);
}
public static <T extends ToXContent> XContentTester<T> xContentTester(
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
CheckedFunction<XContentParser, T, IOException> fromXContent) {
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
CheckedFunction<XContentParser, T, IOException> fromXContent
) {
return xContentTester(createParser, instanceSupplier, ToXContent.EMPTY_PARAMS, fromXContent);
}
public static <T extends ToXContent> XContentTester<T> xContentTester(
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
ToXContent.Params toXContentParams,
CheckedFunction<XContentParser, T, IOException> fromXContent) {
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Supplier<T> instanceSupplier,
ToXContent.Params toXContentParams,
CheckedFunction<XContentParser, T, IOException> fromXContent
) {
return new XContentTester<>(
createParser,
x -> instanceSupplier.get(),
(testInstance, xContentType) ->
XContentHelper.toXContent(testInstance, xContentType, toXContentParams, false),
fromXContent);
(testInstance, xContentType) -> XContentHelper.toXContent(testInstance, xContentType, toXContentParams, false),
fromXContent
);
}
public static <T extends ToXContent> XContentTester<T> xContentTester(
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Function<XContentType, T> instanceSupplier,
ToXContent.Params toXContentParams,
CheckedFunction<XContentParser, T, IOException> fromXContent) {
CheckedFunction<XContentParser, T, IOException> fromXContent
) {
return new XContentTester<>(
createParser,
instanceSupplier,
(testInstance, xContentType) ->
XContentHelper.toXContent(testInstance, xContentType, toXContentParams, false),
fromXContent);
(testInstance, xContentType) -> XContentHelper.toXContent(testInstance, xContentType, toXContentParams, false),
fromXContent
);
}
/**
@ -128,10 +127,11 @@ public abstract class AbstractXContentTestCase<T extends ToXContent> extends Ope
private boolean assertToXContentEquivalence = true;
private XContentTester(
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Function<XContentType, T> instanceSupplier,
CheckedBiFunction<T, XContentType, BytesReference, IOException> toXContent,
CheckedFunction<XContentParser, T, IOException> fromXContent) {
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser,
Function<XContentType, T> instanceSupplier,
CheckedBiFunction<T, XContentType, BytesReference, IOException> toXContent,
CheckedFunction<XContentParser, T, IOException> fromXContent
) {
this.createParser = createParser;
this.instanceSupplier = instanceSupplier;
this.toXContent = toXContent;
@ -143,16 +143,23 @@ public abstract class AbstractXContentTestCase<T extends ToXContent> extends Ope
XContentType xContentType = randomFrom(XContentType.values());
T testInstance = instanceSupplier.apply(xContentType);
BytesReference originalXContent = toXContent.apply(testInstance, xContentType);
BytesReference shuffledContent = insertRandomFieldsAndShuffle(originalXContent, xContentType, supportsUnknownFields,
shuffleFieldsExceptions, randomFieldsExcludeFilter, createParser);
BytesReference shuffledContent = insertRandomFieldsAndShuffle(
originalXContent,
xContentType,
supportsUnknownFields,
shuffleFieldsExceptions,
randomFieldsExcludeFilter,
createParser
);
XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent);
T parsed = fromXContent.apply(parser);
assertEqualsConsumer.accept(testInstance, parsed);
if (assertToXContentEquivalence) {
assertToXContentEquivalent(
toXContent.apply(testInstance, xContentType),
toXContent.apply(parsed, xContentType),
xContentType);
toXContent.apply(testInstance, xContentType),
toXContent.apply(parsed, xContentType),
xContentType
);
}
}
}
@ -189,24 +196,24 @@ public abstract class AbstractXContentTestCase<T extends ToXContent> extends Ope
}
public static <T extends ToXContent> void testFromXContent(
int numberOfTestRuns,
Supplier<T> instanceSupplier,
boolean supportsUnknownFields,
String[] shuffleFieldsExceptions,
Predicate<String> randomFieldsExcludeFilter,
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParserFunction,
CheckedFunction<XContentParser, T, IOException> fromXContent,
BiConsumer<T, T> assertEqualsConsumer,
boolean assertToXContentEquivalence,
ToXContent.Params toXContentParams) throws IOException {
xContentTester(createParserFunction, instanceSupplier, toXContentParams, fromXContent)
.numberOfTestRuns(numberOfTestRuns)
.supportsUnknownFields(supportsUnknownFields)
.shuffleFieldsExceptions(shuffleFieldsExceptions)
.randomFieldsExcludeFilter(randomFieldsExcludeFilter)
.assertEqualsConsumer(assertEqualsConsumer)
.assertToXContentEquivalence(assertToXContentEquivalence)
.test();
int numberOfTestRuns,
Supplier<T> instanceSupplier,
boolean supportsUnknownFields,
String[] shuffleFieldsExceptions,
Predicate<String> randomFieldsExcludeFilter,
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParserFunction,
CheckedFunction<XContentParser, T, IOException> fromXContent,
BiConsumer<T, T> assertEqualsConsumer,
boolean assertToXContentEquivalence,
ToXContent.Params toXContentParams
) throws IOException {
xContentTester(createParserFunction, instanceSupplier, toXContentParams, fromXContent).numberOfTestRuns(numberOfTestRuns)
.supportsUnknownFields(supportsUnknownFields)
.shuffleFieldsExceptions(shuffleFieldsExceptions)
.randomFieldsExcludeFilter(randomFieldsExcludeFilter)
.assertEqualsConsumer(assertEqualsConsumer)
.assertToXContentEquivalence(assertToXContentEquivalence)
.test();
}
/**
@ -214,9 +221,18 @@ public abstract class AbstractXContentTestCase<T extends ToXContent> extends Ope
* both for equality and asserts equality on the two queries.
*/
public final void testFromXContent() throws IOException {
testFromXContent(NUMBER_OF_TEST_RUNS, this::createTestInstance, supportsUnknownFields(), getShuffleFieldsExceptions(),
getRandomFieldsExcludeFilter(), this::createParser, this::parseInstance, this::assertEqualInstances,
assertToXContentEquivalence(), getToXContentParams());
testFromXContent(
NUMBER_OF_TEST_RUNS,
this::createTestInstance,
supportsUnknownFields(),
getShuffleFieldsExceptions(),
getRandomFieldsExcludeFilter(),
this::createParser,
this::parseInstance,
this::assertEqualInstances,
assertToXContentEquivalence(),
getToXContentParams()
);
}
/**
@ -274,9 +290,14 @@ public abstract class AbstractXContentTestCase<T extends ToXContent> extends Ope
return ToXContent.EMPTY_PARAMS;
}
static BytesReference insertRandomFieldsAndShuffle(BytesReference xContent, XContentType xContentType,
boolean supportsUnknownFields, String[] shuffleFieldsExceptions, Predicate<String> randomFieldsExcludeFilter,
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParserFunction) throws IOException {
static BytesReference insertRandomFieldsAndShuffle(
BytesReference xContent,
XContentType xContentType,
boolean supportsUnknownFields,
String[] shuffleFieldsExceptions,
Predicate<String> randomFieldsExcludeFilter,
CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParserFunction
) throws IOException {
BytesReference withRandomFields;
if (supportsUnknownFields) {
// add a few random fields to check that the parser is lenient on new fields

View File

@ -125,8 +125,15 @@ public class BackgroundIndexer implements AutoCloseable {
* @param autoStart set to true to start indexing as soon as all threads have been created.
* @param random random instance to use
*/
public BackgroundIndexer(final String index, final String type, final Client client, final int numOfDocs, final int writerCount,
boolean autoStart, Random random) {
public BackgroundIndexer(
final String index,
final String type,
final Client client,
final int numOfDocs,
final int writerCount,
boolean autoStart,
Random random
) {
if (random == null) {
random = RandomizedTest.getRandom();
@ -165,8 +172,9 @@ public class BackgroundIndexer implements AutoCloseable {
if (useAutoGeneratedIDs) {
bulkRequest.add(client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)));
} else {
bulkRequest.add(client.prepareIndex(index, type, Long.toString(id))
.setSource(generateSource(id, threadRandom)));
bulkRequest.add(
client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom))
);
}
}
try {
@ -194,7 +202,9 @@ public class BackgroundIndexer implements AutoCloseable {
if (useAutoGeneratedIDs) {
try {
IndexResponse indexResponse = client.prepareIndex(index, type)
.setTimeout(timeout).setSource(generateSource(id, threadRandom)).get();
.setTimeout(timeout)
.setSource(generateSource(id, threadRandom))
.get();
boolean add = ids.add(indexResponse.getId());
assert add : "ID: " + indexResponse.getId() + " already used";
} catch (Exception e) {
@ -205,7 +215,9 @@ public class BackgroundIndexer implements AutoCloseable {
} else {
try {
IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id))
.setTimeout(timeout).setSource(generateSource(id, threadRandom)).get();
.setTimeout(timeout)
.setSource(generateSource(id, threadRandom))
.get();
boolean add = ids.add(indexResponse.getId());
assert add : "ID: " + indexResponse.getId() + " already used";
} catch (Exception e) {
@ -221,8 +233,9 @@ public class BackgroundIndexer implements AutoCloseable {
trackFailure(e);
final long docId = id;
logger.warn(
(Supplier<?>)
() -> new ParameterizedMessage("**** failed indexing thread {} on doc id {}", indexerId, docId), e);
(Supplier<?>) () -> new ParameterizedMessage("**** failed indexing thread {} on doc id {}", indexerId, docId),
e
);
} finally {
stopLatch.countDown();
}
@ -254,10 +267,7 @@ public class BackgroundIndexer implements AutoCloseable {
text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength));
}
XContentBuilder builder = XContentFactory.smileBuilder();
builder.startObject().field("test", "value" + id)
.field("text", text.toString())
.field("id", id)
.endObject();
builder.startObject().field("test", "value" + id).field("text", text.toString()).field("id", id).endObject();
return builder;
}

View File

@ -62,8 +62,11 @@ import static junit.framework.TestCase.fail;
public class ClusterServiceUtils {
public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) {
MasterService masterService = new MasterService(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_master_node").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool);
MasterService masterService = new MasterService(
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_master_node").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool
);
AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
clusterStateRef.set(event.state());
@ -76,19 +79,19 @@ public class ClusterServiceUtils {
public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) {
ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName()))
.nodes(DiscoveryNodes.builder()
.add(localNode)
.localNodeId(localNode.getId())
.masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
.build();
return createMasterService(threadPool, initialClusterState);
}
public static void setState(ClusterApplierService executor, ClusterState clusterState) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Exception> exception = new AtomicReference<>();
executor.onNewClusterState("test setting state",
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(), new ClusterApplyListener() {
executor.onNewClusterState(
"test setting state",
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(),
new ClusterApplyListener() {
@Override
public void onSuccess(String source) {
latch.countDown();
@ -99,7 +102,8 @@ public class ClusterServiceUtils {
exception.set(e);
latch.countDown();
}
});
}
);
try {
latch.await();
if (exception.get() != null) {
@ -137,8 +141,13 @@ public class ClusterServiceUtils {
}
public static ClusterService createClusterService(ThreadPool threadPool) {
DiscoveryNode discoveryNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(),
DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT);
DiscoveryNode discoveryNode = new DiscoveryNode(
"node",
OpenSearchTestCase.buildNewFakeTransportAddress(),
Collections.emptyMap(),
DiscoveryNodeRole.BUILT_IN_ROLES,
Version.CURRENT
);
return createClusterService(threadPool, discoveryNode);
}
@ -147,21 +156,15 @@ public class ClusterServiceUtils {
}
public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode, ClusterSettings clusterSettings) {
Settings settings = Settings.builder()
.put("node.name", "test")
.put("cluster.name", "ClusterServiceTests")
.build();
Settings settings = Settings.builder().put("node.name", "test").put("cluster.name", "ClusterServiceTests").build();
ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool);
clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService());
ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName()))
.nodes(DiscoveryNodes.builder()
.add(localNode)
.localNodeId(localNode.getId())
.masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
.build();
clusterService.getClusterApplierService().setInitialState(initialClusterState);
clusterService.getMasterService().setClusterStatePublisher(
createClusterStatePublisher(clusterService.getClusterApplierService()));
clusterService.getMasterService().setClusterStatePublisher(createClusterStatePublisher(clusterService.getClusterApplierService()));
clusterService.getMasterService().setClusterStateSupplier(clusterService.getClusterApplierService()::state);
clusterService.start();
return clusterService;
@ -183,19 +186,20 @@ public class ClusterServiceUtils {
}
public static ClusterStatePublisher createClusterStatePublisher(ClusterApplier clusterApplier) {
return (event, publishListener, ackListener) ->
clusterApplier.onNewClusterState("mock_publish_to_self[" + event.source() + "]", () -> event.state(),
new ClusterApplyListener() {
@Override
public void onSuccess(String source) {
publishListener.onResponse(null);
}
@Override
public void onFailure(String source, Exception e) {
publishListener.onFailure(e);
}
return (event, publishListener, ackListener) -> clusterApplier.onNewClusterState(
"mock_publish_to_self[" + event.source() + "]",
() -> event.state(),
new ClusterApplyListener() {
@Override
public void onSuccess(String source) {
publishListener.onResponse(null);
}
@Override
public void onFailure(String source, Exception e) {
publishListener.onFailure(e);
}
}
);
}

View File

@ -56,25 +56,21 @@ import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public final class CorruptionUtils {
private static final Logger logger = LogManager.getLogger(CorruptionUtils.class);
private CorruptionUtils() {}
public static void corruptIndex(Random random, Path indexPath, boolean corruptSegments) throws IOException {
// corrupt files
final Path[] filesToCorrupt =
Files.walk(indexPath)
.filter(p -> {
final String name = p.getFileName().toString();
boolean segmentFile = name.startsWith("segments_") || name.endsWith(".si");
return Files.isRegularFile(p)
&& name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS
&& IndexWriter.WRITE_LOCK_NAME.equals(name) == false
&& (corruptSegments ? segmentFile : segmentFile == false);
}
)
.toArray(Path[]::new);
final Path[] filesToCorrupt = Files.walk(indexPath).filter(p -> {
final String name = p.getFileName().toString();
boolean segmentFile = name.startsWith("segments_") || name.endsWith(".si");
return Files.isRegularFile(p)
&& name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS
&& IndexWriter.WRITE_LOCK_NAME.equals(name) == false
&& (corruptSegments ? segmentFile : segmentFile == false);
}).toArray(Path[]::new);
corruptFile(random, filesToCorrupt);
}
@ -115,9 +111,11 @@ public final class CorruptionUtils {
msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ");
msg.append(dir.fileLength(fileToCorrupt.getFileName().toString()));
logger.info("Checksum {}", msg);
assumeTrue("Checksum collision - " + msg.toString(),
checksumAfterCorruption != checksumBeforeCorruption // collision
|| actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
assumeTrue(
"Checksum collision - " + msg.toString(),
checksumAfterCorruption != checksumBeforeCorruption // collision
|| actualChecksumAfterCorruption != checksumBeforeCorruption
); // checksum corrupted
assertThat("no file corrupted", fileToCorrupt, notNullValue());
}
}
@ -138,9 +136,13 @@ public final class CorruptionUtils {
// rewrite
channel.position(filePointer);
channel.write(bb);
logger.info("Corrupting file -- flipping at position {} from {} to {} file: {}", filePointer,
Integer.toHexString(oldValue), Integer.toHexString(newValue), path.getFileName());
logger.info(
"Corrupting file -- flipping at position {} from {} to {} file: {}",
filePointer,
Integer.toHexString(oldValue),
Integer.toHexString(newValue),
path.getFileName()
);
}
}

View File

@ -74,8 +74,8 @@ public final class DiffableTestUtils {
/**
* Simulates sending diffs over the wire
*/
public static <T extends Writeable> T copyInstance(T diffs, NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader) throws IOException {
public static <T extends Writeable> T copyInstance(T diffs, NamedWriteableRegistry namedWriteableRegistry, Reader<T> reader)
throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
diffs.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
@ -88,11 +88,13 @@ public final class DiffableTestUtils {
* Tests making random changes to an object, calculating diffs for these changes, sending this
* diffs over the wire and appling these diffs on the other side.
*/
public static <T extends Diffable<T>> void testDiffableSerialization(Supplier<T> testInstance,
Function<T, T> modifier,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader,
Reader<Diff<T>> diffReader) throws IOException {
public static <T extends Diffable<T>> void testDiffableSerialization(
Supplier<T> testInstance,
Function<T, T> modifier,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader,
Reader<Diff<T>> diffReader
) throws IOException {
T remoteInstance = testInstance.get();
T localInstance = assertSerialization(remoteInstance, namedWriteableRegistry, reader);
for (int runs = 0; runs < NUMBER_OF_DIFF_TEST_RUNS; runs++) {
@ -107,8 +109,11 @@ public final class DiffableTestUtils {
/**
* Asserts that testInstance can be correctly.
*/
public static <T extends Writeable> T assertSerialization(T testInstance, NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader) throws IOException {
public static <T extends Writeable> T assertSerialization(
T testInstance,
NamedWriteableRegistry namedWriteableRegistry,
Reader<T> reader
) throws IOException {
T deserializedInstance = copyInstance(testInstance, namedWriteableRegistry, reader);
assertEquals(testInstance, deserializedInstance);
assertEquals(testInstance.hashCode(), deserializedInstance.hashCode());

View File

@ -45,6 +45,5 @@ public class DummyShardLock extends ShardLock {
}
@Override
protected void closeInternal() {
}
protected void closeInternal() {}
}

Some files were not shown because too many files have changed in this diff Show More