Merge branch 'master' into pr/16598-register-filter-settings
# Conflicts: # core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java # core/src/main/java/org/elasticsearch/common/settings/Setting.java # core/src/test/java/org/elasticsearch/common/settings/SettingTests.java
This commit is contained in:
commit
c11cf3bf1f
|
@ -166,7 +166,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
completion = indexShard.completionStats(flags.completionDataFields());
|
||||
break;
|
||||
case Segments:
|
||||
segments = indexShard.segmentStats();
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case Percolate:
|
||||
percolate = indexShard.percolateStats();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -38,6 +39,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
private String[] groups = null;
|
||||
private String[] fieldDataFields = null;
|
||||
private String[] completionDataFields = null;
|
||||
private boolean includeSegmentFileSizes = false;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -62,6 +64,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -74,6 +77,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -137,6 +141,15 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
return this.completionDataFields;
|
||||
}
|
||||
|
||||
public CommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
this.includeSegmentFileSizes = includeSegmentFileSizes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return this.includeSegmentFileSizes;
|
||||
}
|
||||
|
||||
public boolean isSet(Flag flag) {
|
||||
return flags.contains(flag);
|
||||
}
|
||||
|
@ -177,6 +190,9 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -192,6 +208,11 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
} else {
|
||||
includeSegmentFileSizes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -265,6 +265,15 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
return flags.isSet(Flag.Recovery);
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return flags.includeSegmentFileSizes();
|
||||
}
|
||||
|
||||
public IndicesStatsRequest includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
flags.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
|
|
@ -166,4 +166,9 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
|||
request.recovery(recovery);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setIncludeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
request.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,6 +144,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
}
|
||||
if (request.completion()) {
|
||||
flags.set(CommonStatsFlags.Flag.Completion);
|
||||
|
|
|
@ -60,9 +60,11 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -74,27 +76,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private final ClusterService clusterService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
@Inject
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
this(settings, threadPool, transportService, clusterService,
|
||||
shardBulkAction, createIndexAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
System::nanoTime);
|
||||
}
|
||||
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.clusterService = clusterService;
|
||||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = relativeTime();
|
||||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (autoCreateIndex.needToCheck()) {
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
|
@ -113,7 +129,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
|
@ -164,6 +180,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
}
|
||||
|
||||
boolean needToCheck() {
|
||||
return autoCreateIndex.needToCheck();
|
||||
}
|
||||
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
|
@ -196,15 +220,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
* @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
|
||||
*/
|
||||
public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTimeNanos = System.nanoTime();
|
||||
final long startTimeNanos = relativeTime();
|
||||
executeBulk(bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
}
|
||||
|
||||
private long buildTookInMillis(long startTimeNanos) {
|
||||
return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNanos);
|
||||
return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos);
|
||||
}
|
||||
|
||||
private void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
// TODO use timeout to wait here if its blocked...
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
|
||||
|
@ -398,7 +422,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
@ -422,4 +445,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private long relativeTime() {
|
||||
return relativeTimeProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
}
|
||||
|
||||
|
||||
private class AsyncAction {
|
||||
class AsyncAction {
|
||||
|
||||
private final NodesRequest request;
|
||||
private final String[] nodesIds;
|
||||
|
@ -120,7 +120,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
@ -135,7 +135,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
void start() {
|
||||
if (nodesIds.length == 0) {
|
||||
// nothing to notify
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
|
@ -158,11 +158,6 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
|
|
|
@ -235,12 +235,6 @@ public abstract class TransportTasksAction<
|
|||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we
|
||||
// need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.bootstrap;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
|
@ -33,8 +32,6 @@ import org.elasticsearch.common.inject.CreationException;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -42,17 +39,12 @@ import org.elasticsearch.monitor.os.OsProbe;
|
|||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
@ -142,6 +134,8 @@ final class Bootstrap {
|
|||
// we've already logged this.
|
||||
}
|
||||
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
|
||||
// init lucene random seed. it will use /dev/urandom where available:
|
||||
StringHelper.randomId();
|
||||
}
|
||||
|
@ -189,7 +183,8 @@ final class Bootstrap {
|
|||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
enforceOrLogLimits(nodeSettings);
|
||||
|
||||
BootstrapCheck.check(nodeSettings);
|
||||
|
||||
node = new Node(nodeSettings);
|
||||
}
|
||||
|
@ -349,50 +344,4 @@ final class Bootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
static final Set<Setting> ENFORCE_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
|
||||
private static boolean enforceLimits(Settings settings) {
|
||||
if (Build.CURRENT.isSnapshot()) {
|
||||
return false;
|
||||
}
|
||||
for (Setting setting : ENFORCE_SETTINGS) {
|
||||
if (setting.exists(settings)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void enforceOrLogLimits(Settings settings) { // pkg private for testing
|
||||
/* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if mlockall failed and was configured
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?*/
|
||||
final boolean enforceLimits = enforceLimits(settings);
|
||||
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
final long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
if (maxFileDescriptorCount != -1) {
|
||||
final int fileDescriptorCountThreshold = (1 << 16);
|
||||
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
|
||||
if (enforceLimits){
|
||||
throw new IllegalStateException("max file descriptors [" + maxFileDescriptorCount
|
||||
+ "] for elasticsearch process likely too low, increase it to at least [" + fileDescriptorCountThreshold +"]");
|
||||
}
|
||||
logger.warn(
|
||||
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
|
||||
maxFileDescriptorCount, fileDescriptorCountThreshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?
|
||||
*/
|
||||
final class BootstrapCheck {
|
||||
|
||||
private BootstrapCheck() {
|
||||
}
|
||||
|
||||
/**
|
||||
* checks the current limits against the snapshot or release build
|
||||
* checks
|
||||
*
|
||||
* @param settings the current node settings
|
||||
*/
|
||||
public static void check(final Settings settings) {
|
||||
check(enforceLimits(settings), checks(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
* executes the provided checks and fails the node if
|
||||
* enforceLimits is true, otherwise logs warnings
|
||||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* warned
|
||||
* @param checks the checks to execute
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(final boolean enforceLimits, final List<Check> checks) {
|
||||
final ESLogger logger = Loggers.getLogger(BootstrapCheck.class);
|
||||
|
||||
for (final Check check : checks) {
|
||||
final boolean fail = check.check();
|
||||
if (fail) {
|
||||
if (enforceLimits) {
|
||||
throw new RuntimeException(check.errorMessage());
|
||||
} else {
|
||||
logger.warn(check.errorMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The set of settings such that if any are set for the node, then
|
||||
* the checks are enforced
|
||||
*
|
||||
* @return the enforcement settings
|
||||
*/
|
||||
// visible for testing
|
||||
static Set<Setting> enforceSettings() {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests if the checks should be enforced
|
||||
*
|
||||
* @param settings the current node settings
|
||||
* @return true if the checks should be enforced
|
||||
*/
|
||||
// visible for testing
|
||||
static boolean enforceLimits(final Settings settings) {
|
||||
return enforceSettings().stream().anyMatch(s -> s.exists(settings));
|
||||
}
|
||||
|
||||
// the list of checks to execute
|
||||
private static List<Check> checks(final Settings settings) {
|
||||
final List<Check> checks = new ArrayList<>();
|
||||
final FileDescriptorCheck fileDescriptorCheck
|
||||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
checks.add(fileDescriptorCheck);
|
||||
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
|
||||
if (Constants.LINUX) {
|
||||
checks.add(new MaxNumberOfThreadsCheck());
|
||||
}
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulates a limit check
|
||||
*/
|
||||
interface Check {
|
||||
|
||||
/**
|
||||
* test if the node fails the check
|
||||
*
|
||||
* @return true if the node failed the check
|
||||
*/
|
||||
boolean check();
|
||||
|
||||
/**
|
||||
* the message for a failed check
|
||||
*
|
||||
* @return the error message on check failure
|
||||
*/
|
||||
String errorMessage();
|
||||
|
||||
}
|
||||
|
||||
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
|
||||
|
||||
public OsXFileDescriptorCheck() {
|
||||
// see constant OPEN_MAX defined in
|
||||
// /usr/include/sys/syslimits.h on OS X and its use in JVM
|
||||
// initialization in int os:init_2(void) defined in the JVM
|
||||
// code for BSD (contains OS X)
|
||||
super(10240);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class FileDescriptorCheck implements Check {
|
||||
|
||||
private final int limit;
|
||||
|
||||
FileDescriptorCheck() {
|
||||
this(1 << 16);
|
||||
}
|
||||
|
||||
protected FileDescriptorCheck(final int limit) {
|
||||
if (limit <= 0) {
|
||||
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
|
||||
}
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public final boolean check() {
|
||||
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
|
||||
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
|
||||
getMaxFileDescriptorCount(),
|
||||
limit
|
||||
);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxFileDescriptorCount() {
|
||||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class MlockallCheck implements Check {
|
||||
|
||||
private final boolean mlockallSet;
|
||||
|
||||
public MlockallCheck(final boolean mlockAllSet) {
|
||||
this.mlockallSet = mlockAllSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return mlockallSet && !isMemoryLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "memory locking requested for elasticsearch process but memory is not locked";
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
boolean isMemoryLocked() {
|
||||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 15;
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max number of threads [%d] for user [%s] likely too low, increase to at least [%d]",
|
||||
getMaxNumberOfThreads(),
|
||||
BootstrapInfo.getSystemProperties().get("user.name"),
|
||||
maxNumberOfThreadsThreshold);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxNumberOfThreads() {
|
||||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -48,6 +48,9 @@ class JNANatives {
|
|||
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
|
||||
// otherwise they are only inherited for new threads (ES app threads)
|
||||
static boolean LOCAL_SECCOMP_ALL = false;
|
||||
// set to the maximum number of threads that can be created for
|
||||
// the user ID that owns the running Elasticsearch process
|
||||
static long MAX_NUMBER_OF_THREADS = -1;
|
||||
|
||||
static void tryMlockall() {
|
||||
int errno = Integer.MIN_VALUE;
|
||||
|
@ -103,13 +106,29 @@ class JNANatives {
|
|||
}
|
||||
}
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
if (Constants.LINUX) {
|
||||
// this is only valid on Linux and the value *is* different on OS X
|
||||
// see /usr/include/sys/resource.h on OS X
|
||||
// on Linux the resource RLIMIT_NPROC means *the number of threads*
|
||||
// this is in opposition to BSD-derived OSes
|
||||
final int rlimit_nproc = 6;
|
||||
|
||||
final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit();
|
||||
if (JNACLibrary.getrlimit(rlimit_nproc, rlimit) == 0) {
|
||||
MAX_NUMBER_OF_THREADS = rlimit.rlim_cur.longValue();
|
||||
} else {
|
||||
logger.warn("unable to retrieve max number of threads [" + JNACLibrary.strerror(Native.getLastError()) + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static String rlimitToString(long value) {
|
||||
assert Constants.LINUX || Constants.MAC_OS_X;
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
return Long.toUnsignedString(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -209,16 +209,6 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this node form a connection to the provided node.
|
||||
*/
|
||||
public boolean shouldConnectTo(DiscoveryNode otherNode) {
|
||||
if (clientNode() && otherNode.clientNode()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* The address that the node can be communicated with.
|
||||
*/
|
||||
|
|
|
@ -574,9 +574,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
|
@ -828,9 +825,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (lifecycle.stoppedOrClosed()) {
|
||||
return;
|
||||
}
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
|
||||
if (!transportService.nodeConnected(node)) {
|
||||
try {
|
||||
|
@ -877,10 +871,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
private boolean nodeRequiresConnection(DiscoveryNode node) {
|
||||
return localNode().shouldConnectTo(node);
|
||||
}
|
||||
|
||||
private static class LocalNodeMasterListeners implements ClusterStateListener {
|
||||
|
||||
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
|
|
@ -34,7 +34,7 @@ public abstract class ESLoggerFactory {
|
|||
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, SettingsProperty.ClusterScope);
|
||||
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
|
||||
Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
|
||||
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
|
||||
SettingsProperty.Dynamic, SettingsProperty.ClusterScope);
|
||||
|
||||
public static ESLogger getLogger(String prefix, String name) {
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.lucene.search.similarities.Similarity;
|
|||
import org.apache.lucene.search.similarities.Similarity.SimScorer;
|
||||
import org.apache.lucene.search.similarities.Similarity.SimWeight;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -186,9 +187,13 @@ public final class AllTermQuery extends Query {
|
|||
float boost;
|
||||
if (payload == null) {
|
||||
boost = 1;
|
||||
} else {
|
||||
assert payload.length == 4;
|
||||
} else if (payload.length == 1) {
|
||||
boost = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
|
||||
} else if (payload.length == 4) {
|
||||
// TODO: for bw compat only, remove this in 6.0
|
||||
boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
} else {
|
||||
throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: " + payload);
|
||||
}
|
||||
payloadBoost += boost;
|
||||
}
|
||||
|
|
|
@ -25,11 +25,10 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.lucene.analysis.payloads.PayloadHelper.encodeFloat;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -39,7 +38,7 @@ public final class AllTokenStream extends TokenFilter {
|
|||
return new AllTokenStream(analyzer.tokenStream(allFieldName, allEntries), allEntries);
|
||||
}
|
||||
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[4]);
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[1]);
|
||||
|
||||
private final AllEntries allEntries;
|
||||
|
||||
|
@ -64,7 +63,7 @@ public final class AllTokenStream extends TokenFilter {
|
|||
}
|
||||
final float boost = allEntries.boost(offsetAttribute.startOffset());
|
||||
if (boost != 1.0f) {
|
||||
encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);
|
||||
payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
|
||||
payloadAttribute.setPayload(payloadSpare);
|
||||
} else {
|
||||
payloadAttribute.setPayload(null);
|
||||
|
|
|
@ -76,7 +76,7 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||
this.docsEnumFlag = docsEnumFlag;
|
||||
if (filter == null) {
|
||||
// Important - need to use the doc count that includes deleted docs
|
||||
// or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
|
||||
// or we have this issue: https://github.com/elastic/elasticsearch/issues/7951
|
||||
numDocs = reader.maxDoc();
|
||||
}
|
||||
List<LeafReaderContext> leaves = reader.leaves();
|
||||
|
|
|
@ -296,12 +296,25 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
}
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
assert assertMatcher(key, 1);
|
||||
return entry.getValue().getConcreteSetting(key);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean assertMatcher(String key, int numComplexMatchers) {
|
||||
List<Setting<?>> list = new ArrayList<>();
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
list.add(entry.getValue().getConcreteSetting(key));
|
||||
}
|
||||
}
|
||||
assert list.size() == numComplexMatchers : "Expected " + numComplexMatchers + " complex matchers to match key [" +
|
||||
key + "] but got: " + list.toString();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
|
||||
*/
|
||||
|
|
|
@ -113,7 +113,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
private static final ESLogger logger = Loggers.getLogger(Setting.class);
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
|
||||
private final String key;
|
||||
private final Key key;
|
||||
protected final Function<Settings, String> defaultValue;
|
||||
private final Function<String, T> parser;
|
||||
private final EnumSet<SettingsProperty> properties;
|
||||
|
@ -125,7 +125,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param properties properties for this setting like scope, filtering...
|
||||
*/
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, SettingsProperty... properties) {
|
||||
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, SettingsProperty... properties) {
|
||||
assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
|
||||
this.key = key;
|
||||
this.defaultValue = defaultValue;
|
||||
|
@ -160,6 +160,18 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
this(key, s -> defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value function that returns the default values string representation.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
*/
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(new SimpleKey(key), defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
|
@ -179,6 +191,13 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* @see #isGroupSetting()
|
||||
*/
|
||||
public final String getKey() {
|
||||
return key.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the original representation of a setting key.
|
||||
*/
|
||||
public final Key getRawKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
|
@ -265,7 +284,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean exists(Settings settings) {
|
||||
return settings.get(key) != null;
|
||||
return settings.get(getKey()) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -298,7 +317,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
|
||||
"See the breaking changes lists in the documentation for details", getKey());
|
||||
}
|
||||
return settings.get(key, defaultValue.apply(settings));
|
||||
return settings.get(getKey(), defaultValue.apply(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -306,14 +325,14 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* given key is part of the settings group.
|
||||
* @see #isGroupSetting()
|
||||
*/
|
||||
public boolean match(String toTest) {
|
||||
return key.equals(toTest);
|
||||
public final boolean match(String toTest) {
|
||||
return key.match(toTest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("key", key);
|
||||
builder.field("key", key.toString());
|
||||
builder.field("properties", properties);
|
||||
builder.field("is_group_setting", isGroupSetting());
|
||||
builder.field("default", defaultValue.apply(Settings.EMPTY));
|
||||
|
@ -486,6 +505,14 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return value;
|
||||
}
|
||||
|
||||
public static TimeValue parseTimeValue(String s, TimeValue minValue, String key) {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return timeValue;
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, SettingsProperty... properties) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, properties);
|
||||
}
|
||||
|
@ -535,20 +562,15 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
Function<String, List<T>> parser = (s) ->
|
||||
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
|
||||
|
||||
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser,
|
||||
properties) {
|
||||
return new Setting<List<T>>(new ListKey(key),
|
||||
(s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) {
|
||||
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
|
||||
@Override
|
||||
public String getRaw(Settings settings) {
|
||||
String[] array = settings.getAsArray(key, null);
|
||||
String[] array = settings.getAsArray(getKey(), null);
|
||||
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return pattern.matcher(toTest).matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean hasComplexMatcher() {
|
||||
return true;
|
||||
|
@ -591,11 +613,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
public static Setting<Settings> groupSetting(String key, SettingsProperty... properties) {
|
||||
// TODO CHECK IF WE REMOVE
|
||||
if (key.endsWith(".") == false) {
|
||||
throw new IllegalArgumentException("key must end with a '.'");
|
||||
}
|
||||
return new Setting<Settings>(key, "", (s) -> null, properties) {
|
||||
|
||||
// TODO CHECK IF WE REMOVE -END
|
||||
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, properties) {
|
||||
@Override
|
||||
public boolean isGroupSetting() {
|
||||
return true;
|
||||
|
@ -603,12 +626,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
@Override
|
||||
public Settings get(Settings settings) {
|
||||
return settings.getByPrefix(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return Regex.simpleMatch(key + "*", toTest);
|
||||
return settings.getByPrefix(getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -701,11 +719,28 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
/**
|
||||
* This setting type allows to validate settings that have the same type and a common prefix. For instance feature.${type}=[true|false]
|
||||
* can easily be added with this setting. Yet, dynamic key settings don't support updaters out of the box unless {@link #getConcreteSetting(String)}
|
||||
* is used to pull the updater.
|
||||
* can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless
|
||||
* {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> dynamicKeySetting(String key, String defaultValue, Function<String, T> parser,
|
||||
SettingsProperty... properties) {
|
||||
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance
|
||||
* storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters
|
||||
* out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser,
|
||||
SettingsProperty... properties) {
|
||||
return new Setting<T>(key, defaultValue, parser, properties) {
|
||||
|
||||
@Override
|
||||
|
@ -713,14 +748,9 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return toTest.startsWith(getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
|
||||
throw new UnsupportedOperationException("dynamic settings can't be updated use #getConcreteSetting for updating");
|
||||
throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating.");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -728,9 +758,145 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
if (match(key)) {
|
||||
return new Setting<>(key, defaultValue, parser, properties);
|
||||
} else {
|
||||
throw new IllegalArgumentException("key must match setting but didn't ["+key +"]");
|
||||
throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't.");
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
public interface Key {
|
||||
boolean match(String key);
|
||||
}
|
||||
|
||||
public static class SimpleKey implements Key {
|
||||
protected final String key;
|
||||
|
||||
public SimpleKey(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String key) {
|
||||
return this.key.equals(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
SimpleKey simpleKey = (SimpleKey) o;
|
||||
return Objects.equals(key, simpleKey.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key);
|
||||
}
|
||||
}
|
||||
|
||||
public static final class GroupKey extends SimpleKey {
|
||||
public GroupKey(String key) {
|
||||
super(key);
|
||||
if (key.endsWith(".") == false) {
|
||||
throw new IllegalArgumentException("key must end with a '.'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return Regex.simpleMatch(key + "*", toTest);
|
||||
}
|
||||
}
|
||||
|
||||
public static final class ListKey extends SimpleKey {
|
||||
private final Pattern pattern;
|
||||
|
||||
public ListKey(String key) {
|
||||
super(key);
|
||||
this.pattern = Pattern.compile(Pattern.quote(key) + "(\\.\\d+)?");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return pattern.matcher(toTest).matches();
|
||||
}
|
||||
}
|
||||
|
||||
public static final class AffixKey implements Key {
|
||||
public static AffixKey withPrefix(String prefix) {
|
||||
return new AffixKey(prefix, null);
|
||||
}
|
||||
|
||||
public static AffixKey withAdfix(String prefix, String suffix) {
|
||||
return new AffixKey(prefix, suffix);
|
||||
}
|
||||
|
||||
private final String prefix;
|
||||
private final String suffix;
|
||||
|
||||
public AffixKey(String prefix, String suffix) {
|
||||
assert prefix != null || suffix != null: "Either prefix or suffix must be non-null";
|
||||
this.prefix = prefix;
|
||||
this.suffix = suffix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String key) {
|
||||
boolean match = true;
|
||||
if (prefix != null) {
|
||||
match = key.startsWith(prefix);
|
||||
}
|
||||
if (suffix != null) {
|
||||
match = match && key.endsWith(suffix);
|
||||
}
|
||||
return match;
|
||||
}
|
||||
|
||||
public SimpleKey toConcreteKey(String missingPart) {
|
||||
StringBuilder key = new StringBuilder();
|
||||
if (prefix != null) {
|
||||
key.append(prefix);
|
||||
}
|
||||
key.append(missingPart);
|
||||
if (suffix != null) {
|
||||
key.append(".");
|
||||
key.append(suffix);
|
||||
}
|
||||
return new SimpleKey(key.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (prefix != null) {
|
||||
sb.append(prefix);
|
||||
}
|
||||
if (suffix != null) {
|
||||
sb.append("*");
|
||||
sb.append(suffix);
|
||||
sb.append(".");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
AffixKey that = (AffixKey) o;
|
||||
return Objects.equals(prefix, that.prefix) &&
|
||||
Objects.equals(suffix, that.suffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(prefix, suffix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,13 @@ package org.elasticsearch.index.engine;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.SnapshotDeletionPolicy;
|
||||
|
@ -36,12 +38,15 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -64,8 +69,11 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -406,7 +414,7 @@ public abstract class Engine implements Closeable {
|
|||
/**
|
||||
* Global stats on segments.
|
||||
*/
|
||||
public final SegmentsStats segmentsStats() {
|
||||
public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) {
|
||||
ensureOpen();
|
||||
try (final Searcher searcher = acquireSearcher("segments_stats")) {
|
||||
SegmentsStats stats = new SegmentsStats();
|
||||
|
@ -418,12 +426,81 @@ public abstract class Engine implements Closeable {
|
|||
stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
|
||||
stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
|
||||
stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));
|
||||
|
||||
if (includeSegmentFileSizes) {
|
||||
// TODO: consider moving this to StoreStats
|
||||
stats.addFileSizes(getSegmentFileSizes(segmentReader));
|
||||
}
|
||||
}
|
||||
writerSegmentStats(stats);
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
|
||||
private ImmutableOpenMap<String, Long> getSegmentFileSizes(SegmentReader segmentReader) {
|
||||
Directory directory = null;
|
||||
SegmentCommitInfo segmentCommitInfo = segmentReader.getSegmentInfo();
|
||||
boolean useCompoundFile = segmentCommitInfo.info.getUseCompoundFile();
|
||||
if (useCompoundFile) {
|
||||
try {
|
||||
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", e,
|
||||
segmentReader.directory(), segmentCommitInfo);
|
||||
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
} else {
|
||||
directory = segmentReader.directory();
|
||||
}
|
||||
|
||||
assert directory != null;
|
||||
|
||||
String[] files;
|
||||
if (useCompoundFile) {
|
||||
try {
|
||||
files = directory.listAll();
|
||||
} catch (IOException e) {
|
||||
logger.warn("Couldn't list Compound Reader Directory [{}]", e, directory);
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
|
||||
} catch (IOException e) {
|
||||
logger.warn("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", e, segmentReader, segmentReader.getSegmentInfo());
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
}
|
||||
|
||||
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder();
|
||||
for (String file : files) {
|
||||
String extension = IndexFileNames.getExtension(file);
|
||||
long length = 0L;
|
||||
try {
|
||||
length = directory.fileLength(file);
|
||||
} catch (NoSuchFileException | FileNotFoundException e) {
|
||||
logger.warn("Tried to query fileLength but file is gone [{}] [{}]", e, directory, file);
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error when trying to query fileLength [{}] [{}]", e, directory, file);
|
||||
}
|
||||
if (length == 0L) {
|
||||
continue;
|
||||
}
|
||||
map.put(extension, length);
|
||||
}
|
||||
|
||||
if (useCompoundFile && directory != null) {
|
||||
try {
|
||||
directory.close();
|
||||
} catch (IOException e) {
|
||||
logger.warn("Error when closing compound reader on Directory [{}]", e, directory);
|
||||
}
|
||||
}
|
||||
|
||||
return map.build();
|
||||
}
|
||||
|
||||
protected void writerSegmentStats(SegmentsStats stats) {
|
||||
// by default we don't have a writer here... subclasses can override this
|
||||
stats.addVersionMapMemoryInBytes(0);
|
||||
|
|
|
@ -928,12 +928,6 @@ public class InternalEngine extends Engine {
|
|||
iwc.setSimilarity(engineConfig.getSimilarity());
|
||||
iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
|
||||
iwc.setCodec(engineConfig.getCodec());
|
||||
/* We set this timeout to a highish value to work around
|
||||
* the default poll interval in the Lucene lock that is
|
||||
* 1000ms by default. We might need to poll multiple times
|
||||
* here but with 1s poll this is only executed twice at most
|
||||
* in combination with the default writelock timeout*/
|
||||
iwc.setWriteLockTimeout(5000);
|
||||
iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
|
||||
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
|
||||
// of the merge operation and won't slow down _refresh
|
||||
|
|
|
@ -19,6 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -28,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class SegmentsStats implements Streamable, ToXContent {
|
||||
|
||||
|
@ -42,6 +47,33 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
private long indexWriterMaxMemoryInBytes;
|
||||
private long versionMapMemoryInBytes;
|
||||
private long bitsetMemoryInBytes;
|
||||
private ImmutableOpenMap<String, Long> fileSizes = ImmutableOpenMap.of();
|
||||
|
||||
/*
|
||||
* A map to provide a best-effort approach describing Lucene index files.
|
||||
*
|
||||
* Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out,
|
||||
* they'll just miss a proper description in the stats
|
||||
*/
|
||||
private static ImmutableOpenMap<String, String> fileDescriptions = ImmutableOpenMap.<String, String>builder()
|
||||
.fPut("si", "Segment Info")
|
||||
.fPut("fnm", "Fields")
|
||||
.fPut("fdx", "Field Index")
|
||||
.fPut("fdt", "Field Data")
|
||||
.fPut("tim", "Term Dictionary")
|
||||
.fPut("tip", "Term Index")
|
||||
.fPut("doc", "Frequencies")
|
||||
.fPut("pos", "Positions")
|
||||
.fPut("pay", "Payloads")
|
||||
.fPut("nvd", "Norms")
|
||||
.fPut("nvm", "Norms")
|
||||
.fPut("dvd", "DocValues")
|
||||
.fPut("dvm", "DocValues")
|
||||
.fPut("tvx", "Term Vector Index")
|
||||
.fPut("tvd", "Term Vector Documents")
|
||||
.fPut("tvf", "Term Vector Fields")
|
||||
.fPut("liv", "Live Documents")
|
||||
.build();
|
||||
|
||||
public SegmentsStats() {}
|
||||
|
||||
|
@ -49,7 +81,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
this.count += count;
|
||||
this.memoryInBytes += memoryInBytes;
|
||||
}
|
||||
|
||||
|
||||
public void addTermsMemoryInBytes(long termsMemoryInBytes) {
|
||||
this.termsMemoryInBytes += termsMemoryInBytes;
|
||||
}
|
||||
|
@ -86,6 +118,22 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
this.bitsetMemoryInBytes += bitsetMemoryInBytes;
|
||||
}
|
||||
|
||||
public void addFileSizes(ImmutableOpenMap<String, Long> fileSizes) {
|
||||
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(this.fileSizes);
|
||||
|
||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
||||
ObjectObjectCursor<String, Long> entry = it.next();
|
||||
if (map.containsKey(entry.key)) {
|
||||
Long oldValue = map.get(entry.key);
|
||||
map.put(entry.key, oldValue + entry.value);
|
||||
} else {
|
||||
map.put(entry.key, entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
this.fileSizes = map.build();
|
||||
}
|
||||
|
||||
public void add(SegmentsStats mergeStats) {
|
||||
if (mergeStats == null) {
|
||||
return;
|
||||
|
@ -100,6 +148,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
addIndexWriterMaxMemoryInBytes(mergeStats.indexWriterMaxMemoryInBytes);
|
||||
addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes);
|
||||
addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes);
|
||||
addFileSizes(mergeStats.fileSizes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -219,6 +268,10 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
return new ByteSizeValue(bitsetMemoryInBytes);
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, Long> getFileSizes() {
|
||||
return fileSizes;
|
||||
}
|
||||
|
||||
public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
|
||||
SegmentsStats stats = new SegmentsStats();
|
||||
stats.readFrom(in);
|
||||
|
@ -239,6 +292,15 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
builder.byteSizeField(Fields.INDEX_WRITER_MAX_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MAX_MEMORY, indexWriterMaxMemoryInBytes);
|
||||
builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes);
|
||||
builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes);
|
||||
builder.startObject(Fields.FILE_SIZES);
|
||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
||||
ObjectObjectCursor<String, Long> entry = it.next();
|
||||
builder.startObject(entry.key);
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, entry.value);
|
||||
builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others"));
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -266,6 +328,10 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
static final XContentBuilderString VERSION_MAP_MEMORY_IN_BYTES = new XContentBuilderString("version_map_memory_in_bytes");
|
||||
static final XContentBuilderString FIXED_BIT_SET = new XContentBuilderString("fixed_bit_set");
|
||||
static final XContentBuilderString FIXED_BIT_SET_MEMORY_IN_BYTES = new XContentBuilderString("fixed_bit_set_memory_in_bytes");
|
||||
static final XContentBuilderString FILE_SIZES = new XContentBuilderString("file_sizes");
|
||||
static final XContentBuilderString SIZE = new XContentBuilderString("size");
|
||||
static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
|
||||
static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -281,6 +347,19 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
versionMapMemoryInBytes = in.readLong();
|
||||
indexWriterMaxMemoryInBytes = in.readLong();
|
||||
bitsetMemoryInBytes = in.readLong();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
String key = in.readString();
|
||||
Long value = in.readLong();
|
||||
map.put(key, value);
|
||||
}
|
||||
fileSizes = map.build();
|
||||
} else {
|
||||
fileSizes = ImmutableOpenMap.of();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -296,5 +375,14 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
out.writeLong(versionMapMemoryInBytes);
|
||||
out.writeLong(indexWriterMaxMemoryInBytes);
|
||||
out.writeLong(bitsetMemoryInBytes);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
out.writeVInt(fileSizes.size());
|
||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
||||
ObjectObjectCursor<String, Long> entry = it.next();
|
||||
out.writeString(entry.key);
|
||||
out.writeLong(entry.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -510,17 +510,17 @@ class DocumentParser implements Closeable {
|
|||
private static Mapper.Builder<?,?> createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) {
|
||||
Mapper.Builder builder = null;
|
||||
if (fieldType instanceof StringFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string", "string");
|
||||
if (builder == null) {
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType instanceof TextFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "text", "string");
|
||||
if (builder == null) {
|
||||
builder = new TextFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType instanceof KeywordFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "keyword", "string");
|
||||
if (builder == null) {
|
||||
builder = new KeywordFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
|
@ -568,7 +568,7 @@ class DocumentParser implements Closeable {
|
|||
// we need to do it here so we can handle things like attachment templates, where calling
|
||||
// text (to see if its a date) causes the binary value to be cleared
|
||||
{
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "text", null);
|
||||
if (builder != null) {
|
||||
return builder;
|
||||
}
|
||||
|
@ -617,7 +617,7 @@ class DocumentParser implements Closeable {
|
|||
}
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
if (builder == null) {
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
builder = new TextFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
|
|
|
@ -132,6 +132,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
|
||||
+ "or [keyword] field instead for field [" + fieldName + "]");
|
||||
}
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
|
@ -236,6 +240,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
int positionIncrementGap, int ignoreAbove,
|
||||
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) {
|
||||
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
|
||||
+ "or [keyword] field instead for field [" + fieldType.name() + "]");
|
||||
}
|
||||
if (fieldType.tokenized() && fieldType.indexOptions() != NONE && fieldType().hasDocValues()) {
|
||||
throw new MapperParsingException("Field [" + fieldType.name() + "] cannot be analyzed and have doc values");
|
||||
}
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -40,9 +38,8 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -148,7 +145,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
|
||||
public Y build(Mapper.BuilderContext context) {
|
||||
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
|
||||
|
@ -168,11 +165,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
lonMapper = (DoubleFieldMapper) lonMapperBuilder.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
|
||||
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
|
||||
}
|
||||
StringFieldMapper geoHashMapper = null;
|
||||
KeywordFieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
|
||||
.omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH).index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
context.path().remove();
|
||||
|
@ -349,12 +345,12 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
protected DoubleFieldMapper lonMapper;
|
||||
|
||||
protected StringFieldMapper geoHashMapper;
|
||||
protected KeywordFieldMapper geoHashMapper;
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed;
|
||||
|
||||
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.latMapper = latMapper;
|
||||
|
@ -507,7 +503,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
@Override
|
||||
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
|
||||
BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);
|
||||
StringFieldMapper geoUpdated = geoHashMapper == null ? null : (StringFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
KeywordFieldMapper geoUpdated = geoHashMapper == null ? null : (KeywordFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
DoubleFieldMapper latUpdated = latMapper == null ? null : (DoubleFieldMapper) latMapper.updateFieldType(fullNameToFieldType);
|
||||
DoubleFieldMapper lonUpdated = lonMapper == null ? null : (DoubleFieldMapper) lonMapper.updateFieldType(fullNameToFieldType);
|
||||
if (updated == this
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -79,7 +79,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
|
||||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
DoubleFieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
|
@ -110,7 +110,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
|
@ -40,8 +39,8 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -110,7 +109,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
@Override
|
||||
public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
|
||||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
DoubleFieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
|
@ -268,7 +267,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
|
||||
public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
Explicit<Boolean> coerce, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
|
|
|
@ -235,10 +235,30 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
return dynamicDateTimeFormatters;
|
||||
}
|
||||
|
||||
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType) {
|
||||
return findTemplateBuilder(context, name, dynamicType, dynamicType);
|
||||
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String matchType) {
|
||||
final String dynamicType;
|
||||
switch (matchType) {
|
||||
case "string":
|
||||
// string is a corner case since a json string can either map to a
|
||||
// text or keyword field in elasticsearch. For now we use text when
|
||||
// unspecified. For other types, the mapping type matches the json
|
||||
// type so we are fine
|
||||
dynamicType = "text";
|
||||
break;
|
||||
default:
|
||||
dynamicType = matchType;
|
||||
break;
|
||||
}
|
||||
return findTemplateBuilder(context, name, dynamicType, matchType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a template. Returns {@code null} if no template could be found.
|
||||
* @param name the field name
|
||||
* @param dynamicType the field type to give the field if the template does not define one
|
||||
* @param matchType the type of the field in the json document or null if unknown
|
||||
* @return a mapper builder, or null if there is no template for such a field
|
||||
*/
|
||||
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType, String matchType) {
|
||||
DynamicTemplate dynamicTemplate = findTemplate(context.path(), name, matchType);
|
||||
if (dynamicTemplate == null) {
|
||||
|
|
|
@ -132,7 +132,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
DocumentMapper parentDocMapper = context.getMapperService().documentMapper(type);
|
||||
if (parentDocMapper == null) {
|
||||
throw new QueryShardException(context, "[has_parent] query configured 'parent_type' [" + type
|
||||
throw new QueryShardException(context, "[" + NAME + "] query configured 'parent_type' [" + type
|
||||
+ "] is not a valid type");
|
||||
}
|
||||
|
||||
|
@ -152,49 +152,36 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
}
|
||||
|
||||
Set<String> parentTypes = new HashSet<>(5);
|
||||
parentTypes.add(parentDocMapper.type());
|
||||
Set<String> childTypes = new HashSet<>();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = null;
|
||||
for (DocumentMapper documentMapper : context.getMapperService().docMappers(false)) {
|
||||
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
DocumentMapper parentTypeDocumentMapper = context.getMapperService().documentMapper(parentFieldMapper.type());
|
||||
if (parentFieldMapper.active() && type.equals(parentFieldMapper.type())) {
|
||||
childTypes.add(documentMapper.type());
|
||||
parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType());
|
||||
if (parentTypeDocumentMapper == null) {
|
||||
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
|
||||
parentTypes.add(parentFieldMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parentChildIndexFieldData == null) {
|
||||
throw new QueryShardException(context, "[has_parent] no _parent field configured");
|
||||
}
|
||||
|
||||
Query parentTypeQuery = null;
|
||||
if (parentTypes.size() == 1) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentTypes.iterator().next());
|
||||
if (documentMapper != null) {
|
||||
parentTypeQuery = documentMapper.typeFilter();
|
||||
}
|
||||
if (childTypes.isEmpty()) {
|
||||
throw new QueryShardException(context, "[" + NAME + "] no child types found for type [" + type + "]");
|
||||
}
|
||||
|
||||
Query childrenQuery;
|
||||
if (childTypes.size() == 1) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next());
|
||||
childrenQuery = documentMapper.typeFilter();
|
||||
} else {
|
||||
BooleanQuery.Builder parentsFilter = new BooleanQuery.Builder();
|
||||
for (String parentTypeStr : parentTypes) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentTypeStr);
|
||||
if (documentMapper != null) {
|
||||
parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder();
|
||||
for (String childrenTypeStr : childTypes) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr);
|
||||
childrenFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
parentTypeQuery = parentsFilter.build();
|
||||
}
|
||||
|
||||
if (parentTypeQuery == null) {
|
||||
return null;
|
||||
childrenQuery = childrenFilter.build();
|
||||
}
|
||||
|
||||
// wrap the query with type query
|
||||
innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter());
|
||||
Query childrenFilter = Queries.not(parentTypeQuery);
|
||||
return new HasChildQueryBuilder.LateParsingQuery(childrenFilter,
|
||||
return new HasChildQueryBuilder.LateParsingQuery(childrenQuery,
|
||||
innerQuery,
|
||||
HasChildQueryBuilder.DEFAULT_MIN_CHILDREN,
|
||||
HasChildQueryBuilder.DEFAULT_MAX_CHILDREN,
|
||||
|
|
|
@ -630,8 +630,8 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return engine.getMergeStats();
|
||||
}
|
||||
|
||||
public SegmentsStats segmentStats() {
|
||||
SegmentsStats segmentsStats = getEngine().segmentsStats();
|
||||
public SegmentsStats segmentStats(boolean includeSegmentFileSizes) {
|
||||
SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes);
|
||||
segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes());
|
||||
return segmentsStats;
|
||||
}
|
||||
|
|
|
@ -926,13 +926,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
}
|
||||
Store.verify(indexOutput);
|
||||
indexOutput.close();
|
||||
// write the checksum
|
||||
if (fileInfo.metadata().hasLegacyChecksum()) {
|
||||
Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
legacyChecksums.add(fileInfo.metadata());
|
||||
legacyChecksums.write(store);
|
||||
|
||||
}
|
||||
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
||||
success = true;
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
|
|
|
@ -180,7 +180,6 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
*
|
||||
* @return file checksum
|
||||
*/
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return metadata.checksum();
|
||||
}
|
||||
|
|
|
@ -29,8 +29,10 @@ import org.apache.lucene.store.NativeFSLockFactory;
|
|||
import org.apache.lucene.store.RateLimitedFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SleepingLockWrapper;
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -87,10 +89,12 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
|
|||
final Path location = path.resolveIndex();
|
||||
Files.createDirectories(location);
|
||||
Directory wrapped = newFSDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING));
|
||||
if (IndexMetaData.isOnSharedFilesystem(indexSettings.getSettings())) {
|
||||
wrapped = new SleepingLockWrapper(wrapped, 5000);
|
||||
}
|
||||
return new RateLimitedFSDirectory(wrapped, this, this) ;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void onPause(long nanos) {
|
||||
rateLimitingTimeInNanos.inc(nanos);
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.BufferedChecksum;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
/**
|
||||
* Implements verification checks to the best extent possible
|
||||
* against legacy segments.
|
||||
* <p>
|
||||
* For files since ES 1.3, we have a lucene checksum, and
|
||||
* we verify both CRC32 + length from that.
|
||||
* For older segment files, we have an elasticsearch Adler32 checksum
|
||||
* and a length, except for commit points.
|
||||
* For older commit points, we only have the length in metadata,
|
||||
* but lucene always wrote a CRC32 checksum we can verify in the future, too.
|
||||
* For (Jurassic?) files, we dont have an Adler32 checksum at all,
|
||||
* since its optional in the protocol. But we always know the length.
|
||||
* @deprecated only to support old segments
|
||||
*/
|
||||
@Deprecated
|
||||
class LegacyVerification {
|
||||
|
||||
// TODO: add a verifier for old lucene segments_N that also checks CRC.
|
||||
// but for now, at least truncation is detected here (as length will be checked)
|
||||
|
||||
/**
|
||||
* verifies Adler32 + length for index files before lucene 4.8
|
||||
*/
|
||||
static class Adler32VerifyingIndexOutput extends VerifyingIndexOutput {
|
||||
final String adler32;
|
||||
final long length;
|
||||
final Checksum checksum = new BufferedChecksum(new Adler32());
|
||||
long written;
|
||||
|
||||
public Adler32VerifyingIndexOutput(IndexOutput out, String adler32, long length) {
|
||||
super(out);
|
||||
this.adler32 = adler32;
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify() throws IOException {
|
||||
if (written != length) {
|
||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
||||
}
|
||||
final String actualChecksum = Store.digestToString(checksum.getValue());
|
||||
if (!adler32.equals(actualChecksum)) {
|
||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + adler32 +
|
||||
" actual=" + actualChecksum, out.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeByte(byte b) throws IOException {
|
||||
out.writeByte(b);
|
||||
checksum.update(b);
|
||||
written++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
||||
out.writeBytes(bytes, offset, length);
|
||||
checksum.update(bytes, offset, length);
|
||||
written += length;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* verifies length for index files before lucene 4.8
|
||||
*/
|
||||
static class LengthVerifyingIndexOutput extends VerifyingIndexOutput {
|
||||
final long length;
|
||||
long written;
|
||||
|
||||
public LengthVerifyingIndexOutput(IndexOutput out, long length) {
|
||||
super(out);
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify() throws IOException {
|
||||
if (written != length) {
|
||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeByte(byte b) throws IOException {
|
||||
out.writeByte(b);
|
||||
written++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
||||
out.writeBytes(bytes, offset, length);
|
||||
written += length;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -71,7 +70,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
|||
import org.elasticsearch.common.util.concurrent.RefCounted;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
|
@ -84,7 +82,6 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.Time;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -122,8 +119,6 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* </pre>
|
||||
*/
|
||||
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
||||
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||
|
||||
static final String CODEC = "store";
|
||||
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
||||
static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0
|
||||
|
@ -152,7 +147,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
|
@ -462,19 +456,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
IndexOutput output = directory().createOutput(fileName, context);
|
||||
boolean success = false;
|
||||
try {
|
||||
if (metadata.hasLegacyChecksum()) {
|
||||
logger.debug("create legacy adler32 output for {}", fileName);
|
||||
output = new LegacyVerification.Adler32VerifyingIndexOutput(output, metadata.checksum(), metadata.length());
|
||||
} else if (metadata.checksum() == null) {
|
||||
// TODO: when the file is a segments_N, we can still CRC-32 + length for more safety
|
||||
// its had that checksum forever.
|
||||
logger.debug("create legacy length-only output for {}", fileName);
|
||||
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
|
||||
} else {
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||
}
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
@ -491,12 +475,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
|
||||
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
|
||||
if (metadata.hasLegacyChecksum() || metadata.checksum() == null) {
|
||||
logger.debug("open legacy input for {}", filename);
|
||||
return directory().openInput(filename, context);
|
||||
}
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
return new VerifyingIndexInput(directory().openInput(filename, context));
|
||||
}
|
||||
|
||||
|
@ -524,32 +504,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
||||
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
||||
}
|
||||
if (md.writtenBy() != null && md.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
// throw exception if the file is corrupt
|
||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||
// throw exception if metadata is inconsistent
|
||||
if (!checksum.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
||||
", metadata checksum=" + md.checksum(), input);
|
||||
}
|
||||
} else if (md.hasLegacyChecksum()) {
|
||||
// legacy checksum verification - no footer that we need to omit in the checksum!
|
||||
final Checksum checksum = new Adler32();
|
||||
final byte[] buffer = new byte[md.length() > 4096 ? 4096 : (int) md.length()];
|
||||
final long len = input.length();
|
||||
long read = 0;
|
||||
while (len > read) {
|
||||
final long bytesLeft = len - read;
|
||||
final int bytesToRead = bytesLeft < buffer.length ? (int) bytesLeft : buffer.length;
|
||||
input.readBytes(buffer, 0, bytesToRead, false);
|
||||
checksum.update(buffer, 0, bytesToRead);
|
||||
read += bytesToRead;
|
||||
}
|
||||
String adler32 = Store.digestToString(checksum.getValue());
|
||||
if (!adler32.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + md.checksum() +
|
||||
" actual=" + adler32, input);
|
||||
}
|
||||
// throw exception if the file is corrupt
|
||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||
// throw exception if metadata is inconsistent
|
||||
if (!checksum.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
||||
", metadata checksum=" + md.checksum(), input);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -805,7 +765,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
final int size = in.readVInt();
|
||||
Map<String, StoreFileMetaData> metadata = new HashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
StoreFileMetaData meta = StoreFileMetaData.readStoreFileMetaData(in);
|
||||
StoreFileMetaData meta = new StoreFileMetaData(in);
|
||||
metadata.put(meta.name(), meta);
|
||||
}
|
||||
Map<String, String> commitUserData = new HashMap<>();
|
||||
|
@ -842,14 +802,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
|
||||
long numDocs;
|
||||
Map<String, StoreFileMetaData> builder = new HashMap<>();
|
||||
Map<String, String> checksumMap = readLegacyChecksums(directory).v1();
|
||||
Map<String, String> commitUserDataBuilder = new HashMap<>();
|
||||
try {
|
||||
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
||||
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
||||
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
||||
@SuppressWarnings("deprecation")
|
||||
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
|
||||
Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version.
|
||||
for (SegmentCommitInfo info : segmentCommitInfos) {
|
||||
final Version version = info.info.getVersion();
|
||||
if (version == null) {
|
||||
|
@ -860,26 +819,21 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
maxVersion = version;
|
||||
}
|
||||
for (String file : info.files()) {
|
||||
String legacyChecksum = checksumMap.get(file);
|
||||
if (version.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
||||
} else {
|
||||
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), legacyChecksum, version));
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxVersion == null) {
|
||||
maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION;
|
||||
}
|
||||
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
|
||||
String legacyChecksum = checksumMap.get(segmentsFile);
|
||||
if (maxVersion.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||
} else {
|
||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
||||
final long length;
|
||||
try (final IndexInput in = directory.openInput(segmentsFile, IOContext.READONCE)) {
|
||||
length = in.length();
|
||||
hashFile(fileHash, new InputStreamIndexInput(in, length), length);
|
||||
}
|
||||
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, length, legacyChecksum, maxVersion, fileHash.get()));
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion);
|
||||
}
|
||||
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
// we either know the index is corrupted or it's just not there
|
||||
|
@ -904,61 +858,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads legacy checksum files found in the directory.
|
||||
* <p>
|
||||
* Files are expected to start with _checksums- prefix
|
||||
* followed by long file version. Only file with the highest version is read, all other files are ignored.
|
||||
*
|
||||
* @param directory the directory to read checksums from
|
||||
* @return a map of file checksums and the checksum file version
|
||||
*/
|
||||
@SuppressWarnings("deprecation") // Legacy checksum needs legacy methods
|
||||
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
|
||||
synchronized (directory) {
|
||||
long lastFound = -1;
|
||||
for (String name : directory.listAll()) {
|
||||
if (!isChecksum(name)) {
|
||||
continue;
|
||||
}
|
||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
||||
if (current > lastFound) {
|
||||
lastFound = current;
|
||||
}
|
||||
}
|
||||
if (lastFound > -1) {
|
||||
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
|
||||
indexInput.readInt(); // version
|
||||
return new Tuple<>(indexInput.readStringStringMap(), lastFound);
|
||||
}
|
||||
}
|
||||
return new Tuple<>(new HashMap<>(), -1L);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all checksum files with version lower than newVersion.
|
||||
*
|
||||
* @param directory the directory to clean
|
||||
* @param newVersion the latest checksum file version
|
||||
*/
|
||||
static void cleanLegacyChecksums(Directory directory, long newVersion) throws IOException {
|
||||
synchronized (directory) {
|
||||
for (String name : directory.listAll()) {
|
||||
if (isChecksum(name)) {
|
||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
||||
if (current < newVersion) {
|
||||
try {
|
||||
directory.deleteFile(name);
|
||||
} catch (IOException ex) {
|
||||
logger.debug("can't delete old checksum file [{}]", ex, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder,
|
||||
ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
|
||||
final String checksum;
|
||||
|
@ -1227,64 +1126,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
}
|
||||
|
||||
public final static class LegacyChecksums {
|
||||
private final Map<String, String> legacyChecksums = new HashMap<>();
|
||||
|
||||
public void add(StoreFileMetaData metaData) throws IOException {
|
||||
|
||||
if (metaData.hasLegacyChecksum()) {
|
||||
synchronized (this) {
|
||||
// we don't add checksums if they were written by LUCENE_48... now we are using the build in mechanism.
|
||||
legacyChecksums.put(metaData.name(), metaData.checksum());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void write(Store store) throws IOException {
|
||||
synchronized (store.directory) {
|
||||
Tuple<Map<String, String>, Long> tuple = MetadataSnapshot.readLegacyChecksums(store.directory);
|
||||
tuple.v1().putAll(legacyChecksums);
|
||||
if (!tuple.v1().isEmpty()) {
|
||||
writeChecksums(store.directory, tuple.v1(), tuple.v2());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation") // Legacy checksum uses legacy methods
|
||||
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
||||
// Make sure if clock goes backwards we still move version forwards:
|
||||
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
||||
final String checksumName = CHECKSUMS_PREFIX + nextVersion;
|
||||
try (IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT)) {
|
||||
output.writeInt(0); // version
|
||||
output.writeStringStringMap(checksums);
|
||||
}
|
||||
directory.sync(Collections.singleton(checksumName));
|
||||
MetadataSnapshot.cleanLegacyChecksums(directory, nextVersion);
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
this.legacyChecksums.clear();
|
||||
}
|
||||
|
||||
public void remove(String name) {
|
||||
legacyChecksums.remove(name);
|
||||
}
|
||||
}
|
||||
|
||||
public static final String CHECKSUMS_PREFIX = "_checksums-";
|
||||
|
||||
public static boolean isChecksum(String name) {
|
||||
// TODO can we drowp .cks
|
||||
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
|
||||
* This includes write lock and checksum files
|
||||
*/
|
||||
public static boolean isAutogenerated(String name) {
|
||||
return IndexWriter.WRITE_LOCK_NAME.equals(name) || isChecksum(name);
|
||||
return IndexWriter.WRITE_LOCK_NAME.equals(name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,35 +25,42 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class StoreFileMetaData implements Streamable {
|
||||
public class StoreFileMetaData implements Writeable {
|
||||
|
||||
private String name;
|
||||
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||
|
||||
private final String name;
|
||||
|
||||
// the actual file size on "disk", if compressed, the compressed size
|
||||
private long length;
|
||||
private final long length;
|
||||
|
||||
private String checksum;
|
||||
private final String checksum;
|
||||
|
||||
private Version writtenBy;
|
||||
private final Version writtenBy;
|
||||
|
||||
private BytesRef hash;
|
||||
private final BytesRef hash;
|
||||
|
||||
private StoreFileMetaData() {
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length) {
|
||||
this(name, length, null);
|
||||
public StoreFileMetaData(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readString();
|
||||
String versionString = in.readString();
|
||||
assert versionString != null;
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
hash = in.readBytesRef();
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum) {
|
||||
this(name, length, checksum, null, null);
|
||||
this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) {
|
||||
|
@ -61,6 +68,10 @@ public class StoreFileMetaData implements Streamable {
|
|||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
||||
assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that "
|
||||
+ FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||
Objects.requireNonNull(writtenBy, "writtenBy must not be null");
|
||||
Objects.requireNonNull(checksum, "checksum must not be null");
|
||||
this.name = name;
|
||||
this.length = length;
|
||||
this.checksum = checksum;
|
||||
|
@ -85,10 +96,8 @@ public class StoreFileMetaData implements Streamable {
|
|||
|
||||
/**
|
||||
* Returns a string representation of the files checksum. Since Lucene 4.8 this is a CRC32 checksum written
|
||||
* by lucene. Previously we use Adler32 on top of Lucene as the checksum algorithm, if {@link #hasLegacyChecksum()} returns
|
||||
* <code>true</code> this is a Adler32 checksum.
|
||||
* by lucene.
|
||||
*/
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return this.checksum;
|
||||
}
|
||||
|
@ -104,33 +113,22 @@ public class StoreFileMetaData implements Streamable {
|
|||
return length == other.length && checksum.equals(other.checksum) && hash.equals(other.hash);
|
||||
}
|
||||
|
||||
public static StoreFileMetaData readStoreFileMetaData(StreamInput in) throws IOException {
|
||||
StoreFileMetaData md = new StoreFileMetaData();
|
||||
md.readFrom(in);
|
||||
return md;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "name [" + name + "], length [" + length + "], checksum [" + checksum + "], writtenBy [" + writtenBy + "]" ;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readOptionalString();
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
hash = in.readBytesRef();
|
||||
public StoreFileMetaData readFrom(StreamInput in) throws IOException {
|
||||
return new StoreFileMetaData(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeVLong(length);
|
||||
out.writeOptionalString(checksum);
|
||||
out.writeOptionalString(writtenBy == null ? null : writtenBy.toString());
|
||||
out.writeString(checksum);
|
||||
out.writeString(writtenBy.toString());
|
||||
out.writeBytesRef(hash);
|
||||
}
|
||||
|
||||
|
@ -141,14 +139,6 @@ public class StoreFileMetaData implements Streamable {
|
|||
return writtenBy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the checksum is not <code>null</code> and if the file has NOT been written by
|
||||
* a Lucene version greater or equal to Lucene 4.8
|
||||
*/
|
||||
public boolean hasLegacyChecksum() {
|
||||
return checksum != null && (writtenBy == null || writtenBy.onOrAfter(Version.LUCENE_4_8) == false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a variable length hash of the file represented by this metadata object. This can be the file
|
||||
* itself if the file is small enough. If the length of the hash is <tt>0</tt> no hash value is available
|
||||
|
|
|
@ -76,8 +76,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
||||
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
||||
private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||
|
||||
// last time this status was accessed
|
||||
|
@ -145,10 +143,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
return state().getStage();
|
||||
}
|
||||
|
||||
public Store.LegacyChecksums legacyChecksums() {
|
||||
return legacyChecksums;
|
||||
}
|
||||
|
||||
/** renames all temporary files to their true name, potentially overriding existing files */
|
||||
public void renameAllTempFiles() throws IOException {
|
||||
ensureRefCount();
|
||||
|
@ -281,7 +275,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
logger.trace("cleaning temporary file [{}]", file);
|
||||
store.deleteQuiet(file);
|
||||
}
|
||||
legacyChecksums.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -344,8 +337,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
renameAllTempFiles();
|
||||
final Store store = store();
|
||||
// now write checksums
|
||||
legacyChecksums().write(store);
|
||||
try {
|
||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
|
@ -399,8 +390,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
// we are done
|
||||
indexOutput.close();
|
||||
}
|
||||
// write the checksum
|
||||
legacyChecksums().add(fileMetaData);
|
||||
final String temporaryFileName = getTempNameForFile(name);
|
||||
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
||||
store.directory().sync(Collections.singleton(temporaryFileName));
|
||||
|
|
|
@ -172,7 +172,8 @@ public class Node implements Closeable {
|
|||
tmpSettings = TribeService.processSettings(tmpSettings);
|
||||
|
||||
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(tmpSettings));
|
||||
logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.shortHash(), Build.CURRENT.date());
|
||||
final String displayVersion = version + (Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "");
|
||||
logger.info("version[{}], pid[{}], build[{}/{}]", displayVersion, JvmInfo.jvmInfo().pid(), Build.CURRENT.shortHash(), Build.CURRENT.date());
|
||||
|
||||
logger.info("initializing ...");
|
||||
|
||||
|
|
|
@ -109,6 +109,9 @@ public class RestNodesStatsAction extends BaseRestHandler {
|
|||
if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam("types"))) {
|
||||
nodesStatsRequest.indices().types(request.paramAsStringArray("types", null));
|
||||
}
|
||||
if (nodesStatsRequest.indices().isSet(Flag.Segments) && (request.hasParam("include_segment_file_sizes"))) {
|
||||
nodesStatsRequest.indices().includeSegmentFileSizes(true);
|
||||
}
|
||||
|
||||
client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<NodesStatsResponse>(channel));
|
||||
}
|
||||
|
|
|
@ -104,6 +104,10 @@ public class RestIndicesStatsAction extends BaseRestHandler {
|
|||
indicesStatsRequest.fieldDataFields(request.paramAsStringArray("fielddata_fields", request.paramAsStringArray("fields", Strings.EMPTY_ARRAY)));
|
||||
}
|
||||
|
||||
if (indicesStatsRequest.segments() && request.hasParam("include_segment_file_sizes")) {
|
||||
indicesStatsRequest.includeSegmentFileSizes(true);
|
||||
}
|
||||
|
||||
client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener<IndicesStatsResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(IndicesStatsResponse response, XContentBuilder builder) throws Exception {
|
||||
|
|
|
@ -23,8 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
|
@ -43,9 +41,6 @@ import org.elasticsearch.rest.action.support.RestActionListener;
|
|||
import org.elasticsearch.rest.action.support.RestResponseListener;
|
||||
import org.elasticsearch.rest.action.support.RestTable;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
public class RestNodeAttrsAction extends AbstractCatAction {
|
||||
|
@ -73,17 +68,10 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
|||
public void processResponse(final ClusterStateResponse clusterStateResponse) {
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear().jvm(false).os(false).process(true);
|
||||
client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener<NodesInfoResponse>(channel) {
|
||||
client.admin().cluster().nodesInfo(nodesInfoRequest, new RestResponseListener<NodesInfoResponse>(channel) {
|
||||
@Override
|
||||
public void processResponse(final NodesInfoResponse nodesInfoResponse) {
|
||||
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
|
||||
nodesStatsRequest.clear().jvm(false).os(false).fs(false).indices(false).process(false);
|
||||
client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener<NodesStatsResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception {
|
||||
return RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), channel);
|
||||
}
|
||||
});
|
||||
public RestResponse buildResponse(NodesInfoResponse nodesInfoResponse) throws Exception {
|
||||
return RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse), channel);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -106,7 +94,7 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
|||
return table;
|
||||
}
|
||||
|
||||
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
|
||||
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo) {
|
||||
boolean fullId = req.paramAsBoolean("full_id", false);
|
||||
|
||||
DiscoveryNodes nodes = state.getState().nodes();
|
||||
|
@ -115,32 +103,22 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
|||
for (DiscoveryNode node : nodes) {
|
||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
||||
for(ObjectObjectCursor<String, String> att : node.attributes()) {
|
||||
buildRow(fullId, table, node, info, att.key, att.value);
|
||||
}
|
||||
if (info.getServiceAttributes() != null) {
|
||||
for (Map.Entry<String, String> entry : info.getServiceAttributes().entrySet()) {
|
||||
buildRow(fullId, table, node, info, entry.getKey(), entry.getValue());
|
||||
table.startRow();
|
||||
table.addCell(node.name());
|
||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
||||
table.addCell(info == null ? null : info.getProcess().getId());
|
||||
table.addCell(node.getHostName());
|
||||
table.addCell(node.getHostAddress());
|
||||
if (node.address() instanceof InetSocketTransportAddress) {
|
||||
table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
|
||||
} else {
|
||||
table.addCell("-");
|
||||
}
|
||||
table.addCell(att.key);
|
||||
table.addCell(att.value);
|
||||
table.endRow();
|
||||
}
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
private final void buildRow(boolean fullId, Table table, DiscoveryNode node, NodeInfo info, String key, String value) {
|
||||
table.startRow();
|
||||
table.addCell(node.name());
|
||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
||||
table.addCell(info == null ? null : info.getProcess().getId());
|
||||
table.addCell(node.getHostName());
|
||||
table.addCell(node.getHostAddress());
|
||||
if (node.address() instanceof InetSocketTransportAddress) {
|
||||
table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
|
||||
} else {
|
||||
table.addCell("-");
|
||||
}
|
||||
table.addCell(key);
|
||||
table.addCell(value);
|
||||
table.endRow();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,11 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
|
@ -92,7 +95,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
@Override
|
||||
public void processResponse(final ClusterStateResponse clusterStateResponse) {
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear().jvm(true).os(true).process(true);
|
||||
nodesInfoRequest.clear().jvm(true).os(true).process(true).http(true);
|
||||
client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener<NodesInfoResponse>(channel) {
|
||||
@Override
|
||||
public void processResponse(final NodesInfoResponse nodesInfoResponse) {
|
||||
|
@ -249,9 +252,14 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
} else {
|
||||
table.addCell("-");
|
||||
}
|
||||
final Map<String, String> serviceAttributes = info == null ? null : info.getServiceAttributes();
|
||||
if (serviceAttributes != null) {
|
||||
table.addCell(serviceAttributes.getOrDefault("http_address", "-"));
|
||||
final HttpInfo httpInfo = info == null ? null : info.getHttp();
|
||||
if (httpInfo != null) {
|
||||
TransportAddress transportAddress = httpInfo.getAddress().publishAddress();
|
||||
if (transportAddress instanceof InetSocketTransportAddress) {
|
||||
table.addCell(NetworkAddress.formatAddress(((InetSocketTransportAddress)transportAddress).address()));
|
||||
} else {
|
||||
table.addCell(transportAddress.toString());
|
||||
}
|
||||
} else {
|
||||
table.addCell("-");
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* To be implemented by {@link SearchScript} which can provided an {@link Explanation} of the score
|
||||
* This is currently not used inside elasticsearch but it is used, see for example here:
|
||||
* https://github.com/elasticsearch/elasticsearch/issues/8561
|
||||
* https://github.com/elastic/elasticsearch/issues/8561
|
||||
*/
|
||||
public interface ExplainableSearchScript extends LeafSearchScript {
|
||||
|
||||
|
@ -58,4 +58,4 @@ public interface ExplainableSearchScript extends LeafSearchScript {
|
|||
*/
|
||||
Explanation explain(Explanation subQueryScore) throws IOException;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,18 +40,17 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
|
||||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
@ -779,7 +778,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
} else if (context.parseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser, context);
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
|
||||
fieldNames.add(parser.text());
|
||||
field(parser.text());
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
sort(parser.text());
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
|
||||
|
|
|
@ -153,7 +153,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
|||
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
|
||||
}
|
||||
|
||||
//https://github.com/elasticsearch/elasticsearch/issues/5038
|
||||
//https://github.com/elastic/elasticsearch/issues/5038
|
||||
public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
|
||||
//we create a transport client with no nodes to make sure it throws NoNodeAvailableException
|
||||
Settings settings = Settings.builder()
|
||||
|
|
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.cluster.TestClusterService;
|
||||
import org.elasticsearch.test.transport.CapturingTransport;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class TransportBulkActionTookTests extends ESTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = mock(ThreadPool.class);
|
||||
}
|
||||
|
||||
private TransportBulkAction createAction(boolean controlled, AtomicLong expected) {
|
||||
CapturingTransport capturingTransport = new CapturingTransport();
|
||||
ClusterService clusterService = new TestClusterService(threadPool);
|
||||
TransportService transportService = new TransportService(capturingTransport, threadPool);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY);
|
||||
ActionFilters actionFilters = new ActionFilters(new HashSet<>());
|
||||
|
||||
TransportCreateIndexAction createIndexAction = new TransportCreateIndexAction(
|
||||
Settings.EMPTY,
|
||||
transportService,
|
||||
clusterService,
|
||||
threadPool,
|
||||
null,
|
||||
actionFilters,
|
||||
resolver);
|
||||
|
||||
if (controlled) {
|
||||
|
||||
return new TestTransportBulkAction(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
null,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
resolver,
|
||||
null,
|
||||
expected::get) {
|
||||
@Override
|
||||
public void executeBulk(BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
expected.set(1000000);
|
||||
super.executeBulk(bulkRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
void executeBulk(
|
||||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
expected.set(1000000);
|
||||
super.executeBulk(bulkRequest, startTimeNanos, listener, responses);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new TestTransportBulkAction(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
null,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
resolver,
|
||||
null,
|
||||
System::nanoTime) {
|
||||
@Override
|
||||
public void executeBulk(BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
expected.set(elapsed);
|
||||
super.executeBulk(bulkRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
void executeBulk(
|
||||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
expected.set(elapsed);
|
||||
super.executeBulk(bulkRequest, startTimeNanos, listener, responses);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// test unit conversion with a controlled clock
|
||||
public void testTookWithControlledClock() throws Exception {
|
||||
runTestTook(true);
|
||||
}
|
||||
|
||||
// test took advances with System#nanoTime
|
||||
public void testTookWithRealClock() throws Exception {
|
||||
runTestTook(false);
|
||||
}
|
||||
|
||||
private void runTestTook(boolean controlled) throws Exception {
|
||||
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
|
||||
// translate Windows line endings (\r\n) to standard ones (\n)
|
||||
if (Constants.WINDOWS) {
|
||||
bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
|
||||
}
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
|
||||
AtomicLong expected = new AtomicLong();
|
||||
TransportBulkAction action = createAction(controlled, expected);
|
||||
action.doExecute(bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
if (controlled) {
|
||||
assertThat(
|
||||
bulkItemResponses.getTook().getMillis(),
|
||||
equalTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS)));
|
||||
} else {
|
||||
assertThat(
|
||||
bulkItemResponses.getTook().getMillis(),
|
||||
greaterThanOrEqualTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS)));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static class Resolver extends IndexNameExpressionResolver {
|
||||
public Resolver(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
return request.indices();
|
||||
}
|
||||
}
|
||||
|
||||
static class TestTransportBulkAction extends TransportBulkAction {
|
||||
|
||||
public TestTransportBulkAction(
|
||||
Settings settings,
|
||||
ThreadPool threadPool,
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction,
|
||||
TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex,
|
||||
LongSupplier relativeTimeProvider) {
|
||||
super(
|
||||
settings,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
shardBulkAction,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
relativeTimeProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean needToCheck() {
|
||||
return randomBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return randomBoolean();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class TestTransportCreateIndexAction extends TransportCreateIndexAction {
|
||||
|
||||
public TestTransportCreateIndexAction(
|
||||
Settings settings,
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
ThreadPool threadPool,
|
||||
MetaDataCreateIndexService createIndexService,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, CreateIndexRequest request, ActionListener<CreateIndexResponse> listener) {
|
||||
listener.onResponse(newResponse());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.nodes;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeActionTests;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.cluster.TestClusterService;
|
||||
import org.elasticsearch.test.transport.CapturingTransport;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class TransportNodesActionTests extends ESTestCase {
|
||||
|
||||
private static ThreadPool THREAD_POOL;
|
||||
private static ClusterName CLUSTER_NAME = new ClusterName("test-cluster");
|
||||
|
||||
private TestClusterService clusterService;
|
||||
private CapturingTransport transport;
|
||||
private TestTransportNodesAction action;
|
||||
|
||||
public void testRequestIsSentToEachNode() throws Exception {
|
||||
TestNodesRequest request = new TestNodesRequest();
|
||||
PlainActionFuture<TestNodesResponse> listener = new PlainActionFuture<>();
|
||||
action.new AsyncAction(null, request, listener).start();
|
||||
Map<String, List<CapturingTransport.CapturedRequest>> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear();
|
||||
int numNodes = clusterService.state().getNodes().size();
|
||||
// check a request was sent to the right number of nodes
|
||||
assertEquals(numNodes, capturedRequests.size());
|
||||
}
|
||||
|
||||
public void testNodesSelectors() {
|
||||
int numSelectors = randomIntBetween(1, 5);
|
||||
Set<String> nodeSelectors = new HashSet<>();
|
||||
for (int i = 0; i < numSelectors; i++) {
|
||||
nodeSelectors.add(randomFrom(NodeSelector.values()).selector);
|
||||
}
|
||||
int numNodeIds = randomIntBetween(0, 3);
|
||||
String[] nodeIds = clusterService.state().nodes().nodes().keys().toArray(String.class);
|
||||
for (int i = 0; i < numNodeIds; i++) {
|
||||
String nodeId = randomFrom(nodeIds);
|
||||
nodeSelectors.add(nodeId);
|
||||
}
|
||||
String[] finalNodesIds = nodeSelectors.toArray(new String[nodeSelectors.size()]);
|
||||
TestNodesRequest request = new TestNodesRequest(finalNodesIds);
|
||||
action.new AsyncAction(null, request, new PlainActionFuture<>()).start();
|
||||
Map<String, List<CapturingTransport.CapturedRequest>> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear();
|
||||
assertEquals(clusterService.state().nodes().resolveNodesIds(finalNodesIds).length, capturedRequests.size());
|
||||
}
|
||||
|
||||
private enum NodeSelector {
|
||||
LOCAL("_local"), ELECTED_MASTER("_master"), MASTER_ELIGIBLE("master:true"), DATA("data:true"), CUSTOM_ATTRIBUTE("attr:value");
|
||||
|
||||
private final String selector;
|
||||
|
||||
NodeSelector(String selector) {
|
||||
this.selector = selector;
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void startThreadPool() {
|
||||
THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void destroyThreadPool() {
|
||||
ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS);
|
||||
// since static must set to null to be eligible for collection
|
||||
THREAD_POOL = null;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
transport = new CapturingTransport();
|
||||
clusterService = new TestClusterService(THREAD_POOL);
|
||||
final TransportService transportService = new TransportService(transport, THREAD_POOL);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
int numNodes = randomIntBetween(3, 10);
|
||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
List<DiscoveryNode> discoveryNodes = new ArrayList<>();
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
Map<String, String> attributes = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
attributes.put("master", Boolean.toString(randomBoolean()));
|
||||
attributes.put("data", Boolean.toString(randomBoolean()));
|
||||
attributes.put("ingest", Boolean.toString(randomBoolean()));
|
||||
} else {
|
||||
attributes.put("client", "true");
|
||||
}
|
||||
if (frequently()) {
|
||||
attributes.put("custom", randomBoolean() ? "match" : randomAsciiOfLengthBetween(3, 5));
|
||||
}
|
||||
final DiscoveryNode node = newNode(i, attributes);
|
||||
discoBuilder = discoBuilder.put(node);
|
||||
discoveryNodes.add(node);
|
||||
}
|
||||
discoBuilder.localNodeId(randomFrom(discoveryNodes).id());
|
||||
discoBuilder.masterNodeId(randomFrom(discoveryNodes).id());
|
||||
ClusterState.Builder stateBuilder = ClusterState.builder(CLUSTER_NAME);
|
||||
stateBuilder.nodes(discoBuilder);
|
||||
ClusterState clusterState = stateBuilder.build();
|
||||
clusterService.setState(clusterState);
|
||||
action = new TestTransportNodesAction(
|
||||
Settings.EMPTY,
|
||||
THREAD_POOL,
|
||||
clusterService,
|
||||
transportService,
|
||||
new ActionFilters(Collections.emptySet()),
|
||||
TestNodesRequest::new,
|
||||
TestNodeRequest::new,
|
||||
ThreadPool.Names.SAME
|
||||
);
|
||||
}
|
||||
|
||||
private static DiscoveryNode newNode(int nodeId, Map<String, String> attributes) {
|
||||
String node = "node_" + nodeId;
|
||||
return new DiscoveryNode(node, node, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
|
||||
}
|
||||
|
||||
private static class TestTransportNodesAction extends TransportNodesAction<TestNodesRequest, TestNodesResponse, TestNodeRequest,
|
||||
TestNodeResponse> {
|
||||
|
||||
TestTransportNodesAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService
|
||||
transportService, ActionFilters actionFilters, Supplier<TestNodesRequest> request,
|
||||
Supplier<TestNodeRequest> nodeRequest, String nodeExecutor) {
|
||||
super(settings, "indices:admin/test", CLUSTER_NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
null, request, nodeRequest, nodeExecutor);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestNodesResponse newResponse(TestNodesRequest request, AtomicReferenceArray nodesResponses) {
|
||||
final List<TestNodeResponse> nodeResponses = new ArrayList<>();
|
||||
for (int i = 0; i < nodesResponses.length(); i++) {
|
||||
Object resp = nodesResponses.get(i);
|
||||
if (resp instanceof TestNodeResponse) {
|
||||
nodeResponses.add((TestNodeResponse) resp);
|
||||
}
|
||||
}
|
||||
return new TestNodesResponse(nodeResponses);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestNodeRequest newNodeRequest(String nodeId, TestNodesRequest request) {
|
||||
return new TestNodeRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestNodeResponse newNodeResponse() {
|
||||
return new TestNodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestNodeResponse nodeOperation(TestNodeRequest request) {
|
||||
return new TestNodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestNodesRequest extends BaseNodesRequest<TestNodesRequest> {
|
||||
TestNodesRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestNodesResponse extends BaseNodesResponse<TestNodeResponse> {
|
||||
|
||||
private final List<TestNodeResponse> nodeResponses;
|
||||
|
||||
TestNodesResponse(List<TestNodeResponse> nodeResponses) {
|
||||
this.nodeResponses = nodeResponses;
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestNodeRequest extends BaseNodeRequest {
|
||||
}
|
||||
|
||||
private static class TestNodeResponse extends BaseNodeResponse {
|
||||
}
|
||||
}
|
|
@ -82,7 +82,7 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase {
|
|||
|
||||
public void addToMappings(XContentBuilder mappingsBuilder) throws IOException {
|
||||
mappingsBuilder.startObject(name);
|
||||
mappingsBuilder.field("type", "string");
|
||||
mappingsBuilder.field("type", "text");
|
||||
String tv_settings;
|
||||
if (storedPositions && storedOffset && storedPayloads) {
|
||||
tv_settings = "with_positions_offsets_payloads";
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
||||
public class BootstrapCheckTests extends ESTestCase {
|
||||
|
||||
public void testNonProductionMode() {
|
||||
// nothing should happen since we are in non-production mode
|
||||
BootstrapCheck.check(Settings.EMPTY);
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimits() {
|
||||
final boolean osX = randomBoolean(); // simulates OS X versus non-OS X
|
||||
final int limit = osX ? 10240 : 1 << 16;
|
||||
final AtomicLong maxFileDescriptorCount = new AtomicLong(randomIntBetween(1, limit - 1));
|
||||
final BootstrapCheck.FileDescriptorCheck check;
|
||||
if (osX) {
|
||||
check = new BootstrapCheck.OsXFileDescriptorCheck() {
|
||||
@Override
|
||||
long getMaxFileDescriptorCount() {
|
||||
return maxFileDescriptorCount.get();
|
||||
}
|
||||
};
|
||||
} else {
|
||||
check = new BootstrapCheck.FileDescriptorCheck() {
|
||||
@Override
|
||||
long getMaxFileDescriptorCount() {
|
||||
return maxFileDescriptorCount.get();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
fail("should have failed due to max file descriptors too low");
|
||||
} catch (final RuntimeException e) {
|
||||
assertThat(e.getMessage(), containsString("max file descriptors"));
|
||||
}
|
||||
|
||||
maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current file descriptor count is
|
||||
// not available
|
||||
maxFileDescriptorCount.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
|
||||
final IllegalArgumentException e =
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> new BootstrapCheck.FileDescriptorCheck(-randomIntBetween(0, Integer.MAX_VALUE)));
|
||||
assertThat(e.getMessage(), containsString("limit must be positive but was"));
|
||||
}
|
||||
|
||||
public void testMlockallCheck() {
|
||||
class MlockallCheckTestCase {
|
||||
|
||||
private final boolean mlockallSet;
|
||||
private final boolean isMemoryLocked;
|
||||
private final boolean shouldFail;
|
||||
|
||||
public MlockallCheckTestCase(final boolean mlockallSet, final boolean isMemoryLocked, final boolean shouldFail) {
|
||||
this.mlockallSet = mlockallSet;
|
||||
this.isMemoryLocked = isMemoryLocked;
|
||||
this.shouldFail = shouldFail;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final List<MlockallCheckTestCase> testCases = new ArrayList<>();
|
||||
testCases.add(new MlockallCheckTestCase(true, true, false));
|
||||
testCases.add(new MlockallCheckTestCase(true, false, true));
|
||||
testCases.add(new MlockallCheckTestCase(false, true, false));
|
||||
testCases.add(new MlockallCheckTestCase(false, false, false));
|
||||
|
||||
for (final MlockallCheckTestCase testCase : testCases) {
|
||||
final BootstrapCheck.MlockallCheck check = new BootstrapCheck.MlockallCheck(testCase.mlockallSet) {
|
||||
@Override
|
||||
boolean isMemoryLocked() {
|
||||
return testCase.isMemoryLocked;
|
||||
}
|
||||
};
|
||||
|
||||
if (testCase.shouldFail) {
|
||||
try {
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
fail("should have failed due to memory not being locked");
|
||||
} catch (final RuntimeException e) {
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("memory locking requested for elasticsearch process but memory is not locked"));
|
||||
}
|
||||
} else {
|
||||
// nothing should happen
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testMaxNumberOfThreadsCheck() {
|
||||
final int limit = 1 << 15;
|
||||
final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1));
|
||||
final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() {
|
||||
@Override
|
||||
long getMaxNumberOfThreads() {
|
||||
return maxNumberOfThreads.get();
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
fail("should have failed due to max number of threads too low");
|
||||
} catch (final RuntimeException e) {
|
||||
assertThat(e.getMessage(), containsString("max number of threads"));
|
||||
}
|
||||
|
||||
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current max number of threads is
|
||||
// not available
|
||||
maxNumberOfThreads.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testEnforceLimits() {
|
||||
final Set<Setting> enforceSettings = BootstrapCheck.enforceSettings();
|
||||
final Setting setting = randomFrom(Arrays.asList(enforceSettings.toArray(new Setting[enforceSettings.size()])));
|
||||
final Settings settings = Settings.builder().put(setting.getKey(), randomAsciiOfLength(8)).build();
|
||||
assertTrue(BootstrapCheck.enforceLimits(settings));
|
||||
}
|
||||
|
||||
}
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class BootstrapSettingsTests extends ESTestCase {
|
||||
|
@ -33,22 +31,4 @@ public class BootstrapSettingsTests extends ESTestCase {
|
|||
assertTrue(BootstrapSettings.CTRLHANDLER_SETTING.get(Settings.EMPTY));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "this feature is disabled for snapshot builds, for now - see #16835")
|
||||
public void testEnforceMaxFileDescriptorLimits() {
|
||||
// nothing should happen since we are in OOB mode
|
||||
Bootstrap.enforceOrLogLimits(Settings.EMPTY);
|
||||
|
||||
Settings build = Settings.builder().put(randomFrom(Bootstrap.ENFORCE_SETTINGS.toArray(new Setting[0])).getKey(),
|
||||
"127.0.0.1").build();
|
||||
long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
try {
|
||||
Bootstrap.enforceOrLogLimits(build);
|
||||
if (maxFileDescriptorCount != -1 && maxFileDescriptorCount < (1 << 16)) {
|
||||
fail("must have enforced limits: " + maxFileDescriptorCount);
|
||||
}
|
||||
} catch (IllegalStateException ex) {
|
||||
assertTrue(ex.getMessage(), ex.getMessage().startsWith("max file descriptors"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.elasticsearch.bwcompat;
|
||||
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
|
@ -297,6 +299,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
importIndex(indexName);
|
||||
assertIndexSanity(indexName, version);
|
||||
assertBasicSearchWorks(indexName);
|
||||
assertAllSearchWorks(indexName);
|
||||
assertBasicAggregationWorks(indexName);
|
||||
assertRealtimeGetWorks(indexName);
|
||||
assertNewReplicasWork(indexName);
|
||||
|
@ -354,6 +357,39 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
}
|
||||
|
||||
boolean findPayloadBoostInExplanation(Explanation expl) {
|
||||
if (expl.getDescription().startsWith("payloadBoost=") && expl.getValue() != 1f) {
|
||||
return true;
|
||||
} else {
|
||||
boolean found = false;
|
||||
for (Explanation sub : expl.getDetails()) {
|
||||
found |= findPayloadBoostInExplanation(sub);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
}
|
||||
|
||||
void assertAllSearchWorks(String indexName) {
|
||||
logger.info("--> testing _all search");
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
|
||||
SearchHit bestHit = searchRsp.getHits().getAt(0);
|
||||
|
||||
// Make sure there are payloads and they are taken into account for the score
|
||||
// the 'string' field has a boost of 4 in the mappings so it should get a payload boost
|
||||
String stringValue = (String) bestHit.sourceAsMap().get("string");
|
||||
assertNotNull(stringValue);
|
||||
Explanation explanation = client().prepareExplain(indexName, bestHit.getType(), bestHit.getId())
|
||||
.setQuery(QueryBuilders.matchQuery("_all", stringValue)).get().getExplanation();
|
||||
assertTrue("Could not find payload boost in explanation\n" + explanation, findPayloadBoostInExplanation(explanation));
|
||||
|
||||
// Make sure the query can run on the whole index
|
||||
searchRsp = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("_all", stringValue)).setExplain(true).get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
|
||||
}
|
||||
|
||||
void assertBasicAggregationWorks(String indexName) {
|
||||
// histogram on a long
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field("long_sort").interval(10)).get();
|
||||
|
|
|
@ -129,7 +129,7 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
|
|||
logger.info("--> start data node / non master node");
|
||||
internalCluster().startNode(settingsBuilder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
|
||||
assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"string\" },\"field_b\" :{ \"type\" : \"string\" }}}}}}"));
|
||||
assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}"));
|
||||
client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"))).get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class DiscoveryNodesTests extends ESTestCase {
|
||||
|
||||
public void testResolveNodeByIdOrName() {
|
||||
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
||||
DiscoveryNode node = randomFrom(nodes);
|
||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(randomBoolean() ? node.id() : node.name());
|
||||
assertThat(resolvedNode.id(), equalTo(node.id()));
|
||||
}
|
||||
|
||||
public void testResolveNodeByAttribute() {
|
||||
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||
NodeSelector nodeSelector = randomFrom(NodeSelector.values());
|
||||
Set<String> matchingNodeIds = nodeSelector.matchingNodeIds(discoveryNodes);
|
||||
try {
|
||||
DiscoveryNode resolvedNode = discoveryNodes.resolveNode(nodeSelector.selector);
|
||||
assertThat(matchingNodeIds.size(), equalTo(1));
|
||||
assertThat(resolvedNode.id(), equalTo(matchingNodeIds.iterator().next()));
|
||||
} catch(IllegalArgumentException e) {
|
||||
if (matchingNodeIds.size() == 0) {
|
||||
assertThat(e.getMessage(), equalTo("failed to resolve [" + nodeSelector.selector + "], no matching nodes"));
|
||||
} else if (matchingNodeIds.size() > 1) {
|
||||
assertThat(e.getMessage(), containsString("where expected to be resolved to a single node"));
|
||||
} else {
|
||||
fail("resolveNode shouldn't have failed for [" + nodeSelector.selector + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testResolveNodesIds() {
|
||||
DiscoveryNodes discoveryNodes = buildDiscoveryNodes();
|
||||
|
||||
int numSelectors = randomIntBetween(1, 5);
|
||||
Set<String> nodeSelectors = new HashSet<>();
|
||||
Set<String> expectedNodeIdsSet = new HashSet<>();
|
||||
for (int i = 0; i < numSelectors; i++) {
|
||||
NodeSelector nodeSelector = randomFrom(NodeSelector.values());
|
||||
if (nodeSelectors.add(nodeSelector.selector)) {
|
||||
expectedNodeIdsSet.addAll(nodeSelector.matchingNodeIds(discoveryNodes));
|
||||
}
|
||||
}
|
||||
int numNodeIds = randomIntBetween(0, 3);
|
||||
String[] nodeIds = discoveryNodes.nodes().keys().toArray(String.class);
|
||||
for (int i = 0; i < numNodeIds; i++) {
|
||||
String nodeId = randomFrom(nodeIds);
|
||||
nodeSelectors.add(nodeId);
|
||||
expectedNodeIdsSet.add(nodeId);
|
||||
}
|
||||
int numNodeNames = randomIntBetween(0, 3);
|
||||
DiscoveryNode[] nodes = discoveryNodes.nodes().values().toArray(DiscoveryNode.class);
|
||||
for (int i = 0; i < numNodeNames; i++) {
|
||||
DiscoveryNode discoveryNode = randomFrom(nodes);
|
||||
nodeSelectors.add(discoveryNode.name());
|
||||
expectedNodeIdsSet.add(discoveryNode.id());
|
||||
}
|
||||
|
||||
String[] resolvedNodesIds = discoveryNodes.resolveNodesIds(nodeSelectors.toArray(new String[nodeSelectors.size()]));
|
||||
Arrays.sort(resolvedNodesIds);
|
||||
String[] expectedNodesIds = expectedNodeIdsSet.toArray(new String[expectedNodeIdsSet.size()]);
|
||||
Arrays.sort(expectedNodesIds);
|
||||
assertThat(resolvedNodesIds, equalTo(expectedNodesIds));
|
||||
}
|
||||
|
||||
private static DiscoveryNodes buildDiscoveryNodes() {
|
||||
int numNodes = randomIntBetween(1, 10);
|
||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
List<DiscoveryNode> nodesList = new ArrayList<>();
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
Map<String, String> attributes = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
attributes.put("master", Boolean.toString(randomBoolean()));
|
||||
attributes.put("data", Boolean.toString(randomBoolean()));
|
||||
attributes.put("ingest", Boolean.toString(randomBoolean()));
|
||||
} else {
|
||||
attributes.put("client", "true");
|
||||
}
|
||||
if (frequently()) {
|
||||
attributes.put("custom", randomBoolean() ? "match" : randomAsciiOfLengthBetween(3, 5));
|
||||
}
|
||||
final DiscoveryNode node = newNode(i, attributes);
|
||||
discoBuilder = discoBuilder.put(node);
|
||||
nodesList.add(node);
|
||||
}
|
||||
discoBuilder.localNodeId(randomFrom(nodesList).id());
|
||||
discoBuilder.masterNodeId(randomFrom(nodesList).id());
|
||||
return discoBuilder.build();
|
||||
}
|
||||
|
||||
private static DiscoveryNode newNode(int nodeId, Map<String, String> attributes) {
|
||||
return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
|
||||
}
|
||||
|
||||
private enum NodeSelector {
|
||||
LOCAL("_local") {
|
||||
@Override
|
||||
Set<String> matchingNodeIds(DiscoveryNodes nodes) {
|
||||
return Collections.singleton(nodes.localNodeId());
|
||||
}
|
||||
}, ELECTED_MASTER("_master") {
|
||||
@Override
|
||||
Set<String> matchingNodeIds(DiscoveryNodes nodes) {
|
||||
return Collections.singleton(nodes.masterNodeId());
|
||||
}
|
||||
}, MASTER_ELIGIBLE("master:true") {
|
||||
@Override
|
||||
Set<String> matchingNodeIds(DiscoveryNodes nodes) {
|
||||
Set<String> ids = new HashSet<>();
|
||||
nodes.getMasterNodes().keysIt().forEachRemaining(ids::add);
|
||||
return ids;
|
||||
}
|
||||
}, DATA("data:true") {
|
||||
@Override
|
||||
Set<String> matchingNodeIds(DiscoveryNodes nodes) {
|
||||
Set<String> ids = new HashSet<>();
|
||||
nodes.getDataNodes().keysIt().forEachRemaining(ids::add);
|
||||
return ids;
|
||||
}
|
||||
}, CUSTOM_ATTRIBUTE("attr:value") {
|
||||
@Override
|
||||
Set<String> matchingNodeIds(DiscoveryNodes nodes) {
|
||||
Set<String> ids = new HashSet<>();
|
||||
nodes.getNodes().valuesIt().forEachRemaining(node -> {
|
||||
if ("value".equals(node.getAttributes().get("attr"))) {
|
||||
ids.add(node.id());
|
||||
}
|
||||
});
|
||||
return ids;
|
||||
}
|
||||
};
|
||||
|
||||
private final String selector;
|
||||
|
||||
NodeSelector(String selector) {
|
||||
this.selector = selector;
|
||||
}
|
||||
|
||||
abstract Set<String> matchingNodeIds(DiscoveryNodes nodes);
|
||||
}
|
||||
}
|
|
@ -48,7 +48,7 @@ public class CodecTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testAcceptPostingsFormat() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("postings_format", Codec.getDefault().postingsFormat().getName()).endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("postings_format", Codec.getDefault().postingsFormat().getName()).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
int i = 0;
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
|
@ -75,7 +75,7 @@ public class CodecTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testAcceptDocValuesFormat() throws IOException {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("doc_values_format", Codec.getDefault().docValuesFormat().getName()).endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "keyword").field("doc_values_format", Codec.getDefault().docValuesFormat().getName()).endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
int i = 0;
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -89,8 +90,8 @@ public class SimpleAllTests extends ESTestCase {
|
|||
if (payload == null || payload.length == 0) {
|
||||
assertEquals(boost, 1f, 0.001f);
|
||||
} else {
|
||||
assertEquals(4, payload.length);
|
||||
final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
assertEquals(1, payload.length);
|
||||
final float b = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
|
||||
assertEquals(boost, b, 0.001f);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
|
||||
public class ScriptScoreFunctionTests extends ESTestCase {
|
||||
/**
|
||||
* Tests https://github.com/elasticsearch/elasticsearch/issues/2426
|
||||
* Tests https://github.com/elastic/elasticsearch/issues/2426
|
||||
*/
|
||||
public void testScriptScoresReturnsNaN() throws IOException {
|
||||
ScoreFunction scoreFunction = new ScriptScoreFunction(new Script("Float.NaN"), new FloatValueScript(Float.NaN));
|
||||
|
|
|
@ -367,7 +367,7 @@ public class SettingTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDynamicKeySetting() {
|
||||
Setting<Boolean> setting = Setting.dynamicKeySetting("foo.", "false", Boolean::parseBoolean, SettingsProperty.ClusterScope);
|
||||
Setting<Boolean> setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, SettingsProperty.ClusterScope);
|
||||
assertTrue(setting.hasComplexMatcher());
|
||||
assertTrue(setting.match("foo.bar"));
|
||||
assertFalse(setting.match("foo"));
|
||||
|
@ -379,7 +379,28 @@ public class SettingTests extends ESTestCase {
|
|||
setting.getConcreteSetting("foo");
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("key must match setting but didn't [foo]", ex.getMessage());
|
||||
assertEquals("key [foo] must match [foo.] but didn't.", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testAdfixKeySetting() {
|
||||
Setting<Boolean> setting = Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, false, Setting.Scope.CLUSTER);
|
||||
assertTrue(setting.hasComplexMatcher());
|
||||
assertTrue(setting.match("foo.bar.enable"));
|
||||
assertTrue(setting.match("foo.baz.enable"));
|
||||
assertTrue(setting.match("foo.bar.baz.enable"));
|
||||
assertFalse(setting.match("foo.bar"));
|
||||
assertFalse(setting.match("foo.bar.baz.enabled"));
|
||||
assertFalse(setting.match("foo"));
|
||||
Setting<Boolean> concreteSetting = setting.getConcreteSetting("foo.bar.enable");
|
||||
assertTrue(concreteSetting.get(Settings.builder().put("foo.bar.enable", "true").build()));
|
||||
assertFalse(concreteSetting.get(Settings.builder().put("foo.baz.enable", "true").build()));
|
||||
|
||||
try {
|
||||
setting.getConcreteSetting("foo");
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("key [foo] must match [foo*enable.] but didn't.", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,20 +46,13 @@ public class PrioritizedRunnableTests extends ESTestCase {
|
|||
|
||||
// test age advances with System#nanoTime
|
||||
public void testGetAgeInMillisWithRealClock() throws InterruptedException {
|
||||
long nanosecondsInMillisecond = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
|
||||
PrioritizedRunnable runnable = new PrioritizedRunnable(Priority.NORMAL) {
|
||||
@Override
|
||||
public void run() {
|
||||
}
|
||||
};
|
||||
|
||||
// force at least one millisecond to elapse, but ensure the
|
||||
// clock has enough resolution to observe the passage of time
|
||||
long start = System.nanoTime();
|
||||
long elapsed;
|
||||
while ((elapsed = (System.nanoTime() - start)) < nanosecondsInMillisecond) {
|
||||
// busy spin
|
||||
}
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
|
||||
// creation happened before start, so age will be at least as
|
||||
// large as elapsed
|
||||
|
|
|
@ -198,7 +198,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
/**
|
||||
* Test that no split brain occurs under partial network partition. See https://github.com/elasticsearch/elasticsearch/issues/2488
|
||||
* Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488
|
||||
*/
|
||||
public void testFailWithMinimumMasterNodesConfigured() throws Exception {
|
||||
List<String> nodes = startCluster(3);
|
||||
|
|
|
@ -285,7 +285,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
logger.info("--> one node is closed - start indexing data into the second one");
|
||||
client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
|
||||
// TODO: remove once refresh doesn't fail immediately if there a master block:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/9997
|
||||
// https://github.com/elastic/elasticsearch/issues/9997
|
||||
client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().get();
|
||||
client.admin().indices().prepareRefresh().execute().actionGet();
|
||||
|
||||
|
|
|
@ -751,7 +751,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
.startObject("field1").field("type", "object").startObject("properties")
|
||||
.startObject("field2").field("type", "object").startObject("properties")
|
||||
.startObject("field3").field("type", "object").startObject("properties")
|
||||
.startObject("field4").field("type", "string").field("store", true)
|
||||
.startObject("field4").field("type", "text").field("store", true)
|
||||
.endObject().endObject()
|
||||
.endObject().endObject()
|
||||
.endObject().endObject()
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.apache.log4j.AppenderSkeleton;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
|
@ -126,6 +128,7 @@ import static java.util.Collections.emptyMap;
|
|||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
|
||||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.everyItem;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
@ -293,8 +296,8 @@ public class InternalEngineTests extends ESTestCase {
|
|||
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
||||
List<Segment> segments = engine.segments(false);
|
||||
assertThat(segments.isEmpty(), equalTo(true));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(0L));
|
||||
assertThat(engine.segmentsStats().getMemoryInBytes(), equalTo(0L));
|
||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(0L));
|
||||
assertThat(engine.segmentsStats(false).getMemoryInBytes(), equalTo(0L));
|
||||
|
||||
// create a doc and refresh
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
|
||||
|
@ -306,7 +309,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
segments = engine.segments(false);
|
||||
assertThat(segments.size(), equalTo(1));
|
||||
SegmentsStats stats = engine.segmentsStats();
|
||||
SegmentsStats stats = engine.segmentsStats(false);
|
||||
assertThat(stats.getCount(), equalTo(1L));
|
||||
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L));
|
||||
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L));
|
||||
|
@ -324,7 +327,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
segments = engine.segments(false);
|
||||
assertThat(segments.size(), equalTo(1));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(1L));
|
||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(1L));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
assertThat(segments.get(0).getNumDocs(), equalTo(2));
|
||||
|
@ -337,12 +340,12 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
segments = engine.segments(false);
|
||||
assertThat(segments.size(), equalTo(2));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(2L));
|
||||
assertThat(engine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(engine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(2L));
|
||||
assertThat(engine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(engine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(engine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -363,7 +366,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
segments = engine.segments(false);
|
||||
assertThat(segments.size(), equalTo(2));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(2L));
|
||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(2L));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -384,7 +387,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
segments = engine.segments(false);
|
||||
assertThat(segments.size(), equalTo(3));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(3L));
|
||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(3L));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -487,6 +490,29 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSegmentsStatsIncludingFileSizes() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
||||
assertThat(engine.segmentsStats(true).getFileSizes().size(), equalTo(0));
|
||||
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
|
||||
engine.index(new Engine.Index(newUid("1"), doc));
|
||||
engine.refresh("test");
|
||||
|
||||
SegmentsStats stats = engine.segmentsStats(true);
|
||||
assertThat(stats.getFileSizes().size(), greaterThan(0));
|
||||
assertThat((Iterable<Long>) () -> stats.getFileSizes().valuesIt(), everyItem(greaterThan(0L)));
|
||||
|
||||
ObjectObjectCursor<String, Long> firstEntry = stats.getFileSizes().iterator().next();
|
||||
|
||||
ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null);
|
||||
engine.index(new Engine.Index(newUid("2"), doc2));
|
||||
engine.refresh("test");
|
||||
|
||||
assertThat(engine.segmentsStats(true).getFileSizes().get(firstEntry.key), greaterThan(firstEntry.value));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCommitStats() {
|
||||
Document document = testDocumentWithTextField();
|
||||
document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
|
||||
|
|
|
@ -276,8 +276,8 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE);
|
||||
List<Segment> segments = primaryEngine.segments(false);
|
||||
assertThat(segments.isEmpty(), equalTo(true));
|
||||
assertThat(primaryEngine.segmentsStats().getCount(), equalTo(0L));
|
||||
assertThat(primaryEngine.segmentsStats().getMemoryInBytes(), equalTo(0L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(0L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getMemoryInBytes(), equalTo(0L));
|
||||
|
||||
// create a doc and refresh
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
|
||||
|
@ -289,7 +289,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = primaryEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(1));
|
||||
SegmentsStats stats = primaryEngine.segmentsStats();
|
||||
SegmentsStats stats = primaryEngine.segmentsStats(false);
|
||||
assertThat(stats.getCount(), equalTo(1L));
|
||||
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L));
|
||||
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L));
|
||||
|
@ -306,7 +306,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
// Check that the replica sees nothing
|
||||
segments = replicaEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(0));
|
||||
stats = replicaEngine.segmentsStats();
|
||||
stats = replicaEngine.segmentsStats(false);
|
||||
assertThat(stats.getCount(), equalTo(0L));
|
||||
assertThat(stats.getTermsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(stats.getStoredFieldsMemoryInBytes(), equalTo(0L));
|
||||
|
@ -323,7 +323,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
// Check that the primary AND replica sees segments now
|
||||
segments = primaryEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(1));
|
||||
assertThat(primaryEngine.segmentsStats().getCount(), equalTo(1L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(1L));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
assertThat(segments.get(0).getNumDocs(), equalTo(2));
|
||||
|
@ -332,7 +332,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = replicaEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(1));
|
||||
assertThat(replicaEngine.segmentsStats().getCount(), equalTo(1L));
|
||||
assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(1L));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
assertThat(segments.get(0).getNumDocs(), equalTo(2));
|
||||
|
@ -346,12 +346,12 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = primaryEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(2));
|
||||
assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2L));
|
||||
assertThat(primaryEngine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(primaryEngine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(primaryEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -370,12 +370,12 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = replicaEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(2));
|
||||
assertThat(replicaEngine.segmentsStats().getCount(), equalTo(2L));
|
||||
assertThat(replicaEngine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(replicaEngine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(2L));
|
||||
assertThat(replicaEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L));
|
||||
assertThat(replicaEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
|
||||
assertThat(replicaEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -393,7 +393,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = primaryEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(2));
|
||||
assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
@ -416,7 +416,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
|
||||
segments = primaryEngine.segments(false);
|
||||
assertThat(segments.size(), equalTo(3));
|
||||
assertThat(primaryEngine.segmentsStats().getCount(), equalTo(3L));
|
||||
assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(3L));
|
||||
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||
|
|
|
@ -135,7 +135,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
|
|||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
|
||||
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_0); // we need 2.x so that fielddata is allowed on string fields
|
||||
Settings settings = Settings.builder().put("index.fielddata.cache", "none")
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
indexService = createIndex("test", settings);
|
||||
|
|
|
@ -27,15 +27,12 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.fielddata.plain.PagedBytesAtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.SortedNumericDVIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
|
@ -46,33 +43,38 @@ import org.elasticsearch.index.mapper.core.ByteFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Set;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public void testGetForFieldDefaults() {
|
||||
final IndexService indexService = createIndex("test");
|
||||
final IndexFieldDataService ifdService = indexService.fieldData();
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType stringMapper = new StringFieldMapper.Builder("string").tokenized(false).build(ctx).fieldType();
|
||||
final MappedFieldType stringMapper = new KeywordFieldMapper.Builder("string").build(ctx).fieldType();
|
||||
ifdService.clear();
|
||||
IndexFieldData<?> fd = ifdService.getForField(stringMapper);
|
||||
assertTrue(fd instanceof SortedSetDVOrdinalsIndexFieldData);
|
||||
|
@ -99,36 +101,6 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
assertTrue(fd instanceof SortedNumericDVIndexFieldData);
|
||||
}
|
||||
|
||||
public void testChangeFieldDataFormat() throws Exception {
|
||||
final IndexService indexService = createIndex("test");
|
||||
final IndexFieldDataService ifdService = indexService.fieldData();
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("s", "thisisastring", Store.NO));
|
||||
writer.addDocument(doc);
|
||||
final IndexReader reader1 = DirectoryReader.open(writer, true);
|
||||
IndexFieldData<?> ifd = ifdService.getForField(mapper1);
|
||||
assertThat(ifd, instanceOf(PagedBytesIndexFieldData.class));
|
||||
Set<LeafReader> oldSegments = Collections.newSetFromMap(new IdentityHashMap<LeafReader, Boolean>());
|
||||
for (LeafReaderContext arc : reader1.leaves()) {
|
||||
oldSegments.add(arc.reader());
|
||||
AtomicFieldData afd = ifd.load(arc);
|
||||
assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
|
||||
}
|
||||
// write new segment
|
||||
writer.addDocument(doc);
|
||||
final IndexReader reader2 = DirectoryReader.open(writer, true);
|
||||
final MappedFieldType mapper2 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType();
|
||||
ifd = ifdService.getForField(mapper2);
|
||||
assertThat(ifd, instanceOf(SortedSetDVOrdinalsIndexFieldData.class));
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
writer.close();
|
||||
writer.getDirectory().close();
|
||||
}
|
||||
|
||||
public void testFieldDataCacheListener() throws Exception {
|
||||
final IndexService indexService = createIndex("test");
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
|
@ -137,7 +109,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = new StringFieldMapper.Builder("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("s", "thisisastring", Store.NO));
|
||||
|
|
|
@ -242,7 +242,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
// original mapping not modified
|
||||
assertEquals(mapping, serialize(mapper));
|
||||
// but we have an update
|
||||
assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"string\"}}}}", serialize(update));
|
||||
assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"text\"}}}}", serialize(update));
|
||||
}
|
||||
|
||||
public void testIncremental() throws Exception {
|
||||
|
@ -264,7 +264,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
// but we have an update
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
// foo is NOT in the update
|
||||
.startObject("bar").field("type", "string").endObject()
|
||||
.startObject("bar").field("type", "text").endObject()
|
||||
.endObject().endObject().string(), serialize(update));
|
||||
}
|
||||
|
||||
|
@ -284,8 +284,8 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
assertEquals(mapping, serialize(mapper));
|
||||
// but we have an update
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("bar").field("type", "string").endObject()
|
||||
.startObject("foo").field("type", "string").endObject()
|
||||
.startObject("bar").field("type", "text").endObject()
|
||||
.startObject("foo").field("type", "text").endObject()
|
||||
.endObject().endObject().string(), serialize(update));
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
assertEquals(mapping, serialize(mapper));
|
||||
// but we have an update
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
|
||||
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").endObject().endObject().endObject().endObject().endObject()
|
||||
.endObject().endObject().endObject().string(), serialize(update));
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
assertEquals(mapping, serialize(mapper));
|
||||
// but we have an update
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").field("type", "string").endObject()
|
||||
.startObject("foo").field("type", "text").endObject()
|
||||
.endObject().endObject().endObject().string(), serialize(update));
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
assertEquals(mapping, serialize(mapper));
|
||||
// but we have an update
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
|
||||
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").endObject().endObject().endObject().endObject().endObject()
|
||||
.endObject().endObject().endObject().string(), serialize(update));
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
assertEquals(mapping, serialize(mapper));
|
||||
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").startObject("properties")
|
||||
.startObject("bar").field("type", "string").endObject()
|
||||
.startObject("bar").field("type", "text").endObject()
|
||||
.startObject("baz").field("type", "long").endObject()
|
||||
.endObject().endObject()
|
||||
.endObject().endObject().endObject().string(), serialize(update));
|
||||
|
|
|
@ -366,7 +366,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// related to https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// related to https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMistypedTypeInRoot() throws IOException {
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json");
|
||||
try {
|
||||
|
@ -378,7 +378,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMisplacedMappingAsRoot() throws IOException {
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json");
|
||||
try {
|
||||
|
@ -390,7 +390,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
// test that RootObjectMapping still works
|
||||
public void testRootObjectMapperPropertiesDoNotCauseException() throws IOException {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
|
@ -404,7 +404,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
parser.parse("test", new CompressedXContent(mapping));
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMetadataMappersStillWorking() throws MapperParsingException, IOException {
|
||||
String mapping = "{";
|
||||
Map<String, String> rootTypes = new HashMap<>();
|
||||
|
|
|
@ -112,7 +112,7 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase {
|
|||
.startObject("test")
|
||||
.startObject("properties")
|
||||
.startObject("foo")
|
||||
.field("type", "string")
|
||||
.field("type", "text")
|
||||
.field("store", storeCountedFields)
|
||||
.field("analyzer", "simple")
|
||||
.startObject("fields")
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.NumericRangeQuery;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -48,11 +47,10 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.TestSearchContext;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.junit.Before;
|
||||
|
@ -106,11 +104,11 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase {
|
|||
assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
|
||||
|
||||
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
|
||||
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
|
||||
assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
|
||||
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
|
||||
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
|
||||
assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
|
||||
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
|
||||
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
|
||||
assertThat(fieldMapper, instanceOf(TextFieldMapper.class));
|
||||
}
|
||||
|
||||
public void testParseLocal() {
|
||||
|
|
|
@ -45,7 +45,7 @@ public class SimpleDynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
builder.startObject().startObject("person").startArray("dynamic_templates").startObject().startObject("test")
|
||||
.field("match_mapping_type", "string")
|
||||
.startObject("mapping").field("index", "no").endObject()
|
||||
.startObject("mapping").field("index", false).endObject()
|
||||
.endObject().endObject().endArray().endObject().endObject();
|
||||
IndexService index = createIndex("test");
|
||||
client().admin().indices().preparePutMapping("test").setType("person").setSource(builder.string()).get();
|
||||
|
|
|
@ -140,7 +140,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
|||
IndexService indexService = createIndex("test");
|
||||
|
||||
DocumentMapper builderDocMapper = new DocumentMapper.Builder(new RootObjectMapper.Builder("person").add(
|
||||
new StringFieldMapper.Builder("name").store(true)
|
||||
new TextFieldMapper.Builder("name").store(true)
|
||||
.addMultiField(new TextFieldMapper.Builder("indexed").index(true).tokenized(true))
|
||||
.addMultiField(new TextFieldMapper.Builder("not_indexed").index(false).store(true))
|
||||
), indexService.mapperService()).build(indexService.mapperService());
|
||||
|
|
|
@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
public class NullValueTests extends ESSingleNodeTestCase {
|
||||
public void testNullNullValue() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
|
||||
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "string", "boolean", "byte"};
|
||||
String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte"};
|
||||
|
||||
for (String type : typesToTest) {
|
||||
String mapping = XContentFactory.jsonBuilder()
|
||||
|
|
|
@ -40,7 +40,7 @@ import org.elasticsearch.index.mapper.ParsedDocument;
|
|||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.string.SimpleStringMappingTests;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
@ -111,10 +111,10 @@ public class SimpleNumericTests extends ESSingleNodeTestCase {
|
|||
|
||||
defaultMapper = index.mapperService().documentMapper("type");
|
||||
FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
|
||||
assertThat(mapper, instanceOf(StringFieldMapper.class));
|
||||
assertThat(mapper, instanceOf(TextFieldMapper.class));
|
||||
|
||||
mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
|
||||
assertThat(mapper, instanceOf(StringFieldMapper.class));
|
||||
assertThat(mapper, instanceOf(TextFieldMapper.class));
|
||||
}
|
||||
|
||||
public void testIgnoreMalformedOption() throws Exception {
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.index.mapper.core.StringFieldMapper.Builder;
|
|||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -75,7 +74,9 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase {
|
|||
|
||||
@Before
|
||||
public void before() {
|
||||
indexService = createIndex("test");
|
||||
indexService = createIndex("test",
|
||||
// we need 2.x since string is deprecated in 5.0
|
||||
Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build());
|
||||
parser = indexService.mapperService().documentMapperParser();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,14 +20,20 @@
|
|||
package org.elasticsearch.index.mapper.string;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
@ -38,6 +44,12 @@ import static org.hamcrest.Matchers.containsString;
|
|||
* expected in queries.
|
||||
*/
|
||||
public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* The default position_increment_gap should be large enough that most
|
||||
* "sensible" queries phrase slops won't match across values.
|
||||
|
@ -106,11 +118,12 @@ public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTest
|
|||
* strange but not worth breaking some thought.
|
||||
*/
|
||||
public void testDefaultDefaultsToAnalyzer() throws IOException {
|
||||
XContentBuilder settings = XContentFactory.jsonBuilder().startObject().startObject("analysis").startObject("analyzer")
|
||||
.startObject("gappy");
|
||||
settings.field("type", "custom");
|
||||
settings.field("tokenizer", "standard");
|
||||
settings.field("position_increment_gap", 2);
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0)
|
||||
.put("analysis.analyzer.gappy.type", "custom")
|
||||
.put("analysis.analyzer.gappy.tokenizer", "standard")
|
||||
.put("analysis.analyzer.gappy.position_increment_gap", "2")
|
||||
.build();
|
||||
setupAnalyzer(settings, "gappy");
|
||||
testGap(client(), "test", "test", 2);
|
||||
}
|
||||
|
@ -123,7 +136,10 @@ public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTest
|
|||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("string");
|
||||
mapping.field("type", "string");
|
||||
mapping.field("position_increment_gap", positionIncrementGap);
|
||||
client().admin().indices().prepareCreate("test").addMapping("test", mapping).get();
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build())
|
||||
.addMapping("test", mapping)
|
||||
.get();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -131,11 +147,14 @@ public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTest
|
|||
* named "string" that uses the specified analyzer and default
|
||||
* position_increment_gap.
|
||||
*/
|
||||
private void setupAnalyzer(XContentBuilder settings, String analyzer) throws IOException {
|
||||
private void setupAnalyzer(Settings settings, String analyzer) throws IOException {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("string");
|
||||
mapping.field("type", "string");
|
||||
mapping.field("analyzer", analyzer);
|
||||
client().admin().indices().prepareCreate("test").addMapping("test", mapping).setSettings(settings).get();
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.addMapping("test", mapping)
|
||||
.setSettings(settings)
|
||||
.get();
|
||||
}
|
||||
|
||||
private static void testGap(Client client, String indexName, String type, int positionIncrementGap) throws IOException {
|
||||
|
|
|
@ -185,7 +185,7 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
|
|||
}
|
||||
}
|
||||
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/7240
|
||||
// https://github.com/elastic/elasticsearch/issues/7240
|
||||
public void testEmptyBooleanQuery() throws Exception {
|
||||
XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
BytesReference query = contentBuilder.startObject().startObject("bool").endObject().endObject().bytes();
|
||||
|
|
|
@ -73,6 +73,8 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
DATE_FIELD_NAME, "type=date",
|
||||
OBJECT_FIELD_NAME, "type=object"
|
||||
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type"
|
||||
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,20 +134,26 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
}
|
||||
}
|
||||
|
||||
public void testIllegalValues() {
|
||||
public void testIllegalValues() throws IOException {
|
||||
QueryBuilder query = RandomQueryBuilder.createQuery(random());
|
||||
try {
|
||||
new HasParentQueryBuilder(null, query);
|
||||
fail("must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
new HasParentQueryBuilder("foo", null);
|
||||
fail("must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
}
|
||||
|
||||
QueryShardContext context = createShardContext();
|
||||
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder("just_a_type", new MatchAllQueryBuilder());
|
||||
try {
|
||||
queryBuilder.doToQuery(context);
|
||||
} catch (QueryShardException e) {
|
||||
assertThat(e.getMessage(), equalTo("[has_parent] no child types found for type [just_a_type]"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,22 +196,19 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
assertTrue(numericRangeQuery.includesMax());
|
||||
|
||||
double value;
|
||||
double width = 0;
|
||||
try {
|
||||
double width;
|
||||
if (queryBuilder.fieldName().equals(DATE_FIELD_NAME) == false) {
|
||||
value = Double.parseDouble(queryBuilder.value().toString());
|
||||
} catch (NumberFormatException e) {
|
||||
// Maybe its a date
|
||||
value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString());
|
||||
width = queryBuilder.fuzziness().asTimeValue().getMillis();
|
||||
}
|
||||
|
||||
if (width == 0) {
|
||||
if (queryBuilder.fuzziness().equals(Fuzziness.AUTO)) {
|
||||
width = 1;
|
||||
} else {
|
||||
width = queryBuilder.fuzziness().asDouble();
|
||||
}
|
||||
} else {
|
||||
value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString());
|
||||
width = queryBuilder.fuzziness().asTimeValue().getMillis();
|
||||
}
|
||||
|
||||
assertEquals(value - width, numericRangeQuery.getMin().doubleValue(), width * .1);
|
||||
assertEquals(value + width, numericRangeQuery.getMax().doubleValue(), width * .1);
|
||||
}
|
||||
|
|
|
@ -54,10 +54,10 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
|
|||
" \"name\":{\n" +
|
||||
" \"properties\":{\n" +
|
||||
" \"first\": {\n" +
|
||||
" \"type\":\"string\"\n" +
|
||||
" \"type\":\"text\"\n" +
|
||||
" }," +
|
||||
" \"last\": {\n" +
|
||||
" \"type\":\"string\"\n" +
|
||||
" \"type\":\"text\"\n" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }\n" +
|
||||
|
|
|
@ -109,6 +109,8 @@ public class FileInfoTests extends ESTestCase {
|
|||
builder.field(Fields.NAME, name);
|
||||
builder.field(Fields.PHYSICAL_NAME, physicalName);
|
||||
builder.field(Fields.LENGTH, length);
|
||||
builder.field(Fields.WRITTEN_BY, Version.LATEST.toString());
|
||||
builder.field(Fields.CHECKSUM, "666");
|
||||
builder.endObject();
|
||||
byte[] xContent = builder.bytes().toBytes();
|
||||
|
||||
|
@ -122,9 +124,9 @@ public class FileInfoTests extends ESTestCase {
|
|||
assertThat(name, equalTo(parsedInfo.name()));
|
||||
assertThat(physicalName, equalTo(parsedInfo.physicalName()));
|
||||
assertThat(length, equalTo(parsedInfo.length()));
|
||||
assertNull(parsedInfo.checksum());
|
||||
assertNull(parsedInfo.metadata().checksum());
|
||||
assertNull(parsedInfo.metadata().writtenBy());
|
||||
assertEquals("666", parsedInfo.checksum());
|
||||
assertEquals("666", parsedInfo.metadata().checksum());
|
||||
assertEquals(Version.LATEST, parsedInfo.metadata().writtenBy());
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContent)) {
|
||||
parser.nextToken();
|
||||
|
@ -139,14 +141,14 @@ public class FileInfoTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGetPartSize() {
|
||||
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36), new ByteSizeValue(6));
|
||||
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6));
|
||||
int numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
numBytes += info.partBytes(i);
|
||||
}
|
||||
assertEquals(numBytes, 36);
|
||||
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35), new ByteSizeValue(6));
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6));
|
||||
numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
numBytes += info.partBytes(i);
|
||||
|
@ -154,7 +156,7 @@ public class FileInfoTests extends ESTestCase {
|
|||
assertEquals(numBytes, 35);
|
||||
final int numIters = randomIntBetween(10, 100);
|
||||
for (int j = 0; j < numIters; j++) {
|
||||
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000));
|
||||
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666");
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000)));
|
||||
numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ExceptionRetryIT extends ESIntegTestCase {
|
|||
/**
|
||||
* Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally failing.
|
||||
* If auto generated ids are used this must not lead to duplicate ids
|
||||
* see https://github.com/elasticsearch/elasticsearch/issues/8788
|
||||
* see https://github.com/elastic/elasticsearch/issues/8788
|
||||
*/
|
||||
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
|
||||
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.RateLimitedFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SleepingLockWrapper;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class FsDirectoryServiceTests extends ESTestCase {
|
||||
|
||||
public void testHasSleepWrapperOnSharedFS() throws IOException {
|
||||
Settings build = randomBoolean() ?
|
||||
Settings.builder().put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build() :
|
||||
Settings.builder().put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).build();;
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
RateLimitedFSDirectory rateLimitingDirectory = (RateLimitedFSDirectory) directory;
|
||||
Directory delegate = rateLimitingDirectory.getDelegate();
|
||||
assertTrue(delegate.getClass().toString(), delegate instanceof SleepingLockWrapper);
|
||||
}
|
||||
|
||||
public void testHasNoSleepWrapperOnNormalFS() throws IOException {
|
||||
Settings build = Settings.builder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs").build();
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
RateLimitedFSDirectory rateLimitingDirectory = (RateLimitedFSDirectory) directory;
|
||||
Directory delegate = rateLimitingDirectory.getDelegate();
|
||||
assertFalse(delegate instanceof SleepingLockWrapper);
|
||||
assertTrue(delegate instanceof SimpleFSDirectory);
|
||||
}
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.zip.Adler32;
|
||||
|
||||
/**
|
||||
* Simple tests for LegacyVerification (old segments)
|
||||
* @deprecated remove this test when support for lucene 4.x
|
||||
* segments is not longer needed.
|
||||
*/
|
||||
@Deprecated
|
||||
public class LegacyVerificationTests extends ESTestCase {
|
||||
|
||||
public void testAdler32() throws Exception {
|
||||
Adler32 expected = new Adler32();
|
||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
||||
expected.update(bytes);
|
||||
String expectedString = Store.digestToString(expected.getValue());
|
||||
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
||||
out.writeBytes(bytes, 0, bytes.length);
|
||||
out.verify();
|
||||
out.close();
|
||||
out.verify();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testAdler32Corrupt() throws Exception {
|
||||
Adler32 expected = new Adler32();
|
||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
||||
expected.update(bytes);
|
||||
String expectedString = Store.digestToString(expected.getValue());
|
||||
|
||||
byte corruptBytes[] = "abcdefch".getBytes(StandardCharsets.UTF_8);
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
||||
out.writeBytes(corruptBytes, 0, bytes.length);
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException e) {
|
||||
// expected exception
|
||||
}
|
||||
out.close();
|
||||
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException e) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLengthOnlyOneByte() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 1);
|
||||
out.writeByte((byte) 3);
|
||||
out.verify();
|
||||
out.close();
|
||||
out.verify();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLengthOnlyCorrupt() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 2);
|
||||
out.writeByte((byte) 3);
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException expected) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
out.close();
|
||||
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException expected) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -355,95 +355,6 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
@AwaitsFix(bugUrl="Fails with seed E1394B038144F6E")
|
||||
// The test currently fails because the segment infos and the index don't
|
||||
// agree on the oldest version of a segment. We should fix this test by
|
||||
// switching to a static bw index
|
||||
public void testWriteLegacyChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
||||
// set default codec - all segments need checksums
|
||||
final boolean usesOldCodec = randomBoolean();
|
||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(usesOldCodec ? new OldSIMockingCodec() : TestUtil.getDefaultCodec()));
|
||||
int docs = 1 + random().nextInt(100);
|
||||
|
||||
for (int i = 0; i < docs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
if (random().nextBoolean()) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
writer.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
||||
}
|
||||
Store.MetadataSnapshot metadata;
|
||||
// check before we committed
|
||||
try {
|
||||
store.getMetadata();
|
||||
fail("no index present - expected exception");
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// expected
|
||||
}
|
||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
||||
|
||||
writer.close();
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
BytesRef hash = new BytesRef();
|
||||
if (file.startsWith("segments")) {
|
||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
||||
}
|
||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
||||
legacyMeta.put(file, storeFileMetaData);
|
||||
checksums.add(storeFileMetaData);
|
||||
}
|
||||
checksums.write(store);
|
||||
|
||||
metadata = store.getMetadata();
|
||||
Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap();
|
||||
assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size()));
|
||||
if (usesOldCodec) {
|
||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
||||
assertEquals(meta.name() + "checksum", meta.checksum());
|
||||
assertTrue(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
||||
}
|
||||
} else {
|
||||
|
||||
// even if we have a legacy checksum - if we use a new codec we should reuse
|
||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
||||
assertFalse(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
||||
StoreFileMetaData storeFileMetaData = metadata.get(meta.name());
|
||||
try (IndexInput input = store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) {
|
||||
assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput);
|
||||
input.seek(meta.length());
|
||||
Store.verify(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
assertDeleteContent(store, directoryService);
|
||||
IOUtils.close(store);
|
||||
|
||||
}
|
||||
|
||||
public void testNewChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
|
@ -489,7 +400,6 @@ public class StoreTests extends ESTestCase {
|
|||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
|
||||
assertThat(meta.hash().length, greaterThan(0));
|
||||
|
@ -503,97 +413,6 @@ public class StoreTests extends ESTestCase {
|
|||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testMixedChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
||||
// this time random codec....
|
||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
|
||||
int docs = 1 + random().nextInt(100);
|
||||
|
||||
for (int i = 0; i < docs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
if (random().nextBoolean()) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
writer.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
||||
}
|
||||
Store.MetadataSnapshot metadata;
|
||||
// check before we committed
|
||||
try {
|
||||
store.getMetadata();
|
||||
fail("no index present - expected exception");
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// expected
|
||||
}
|
||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
||||
writer.commit();
|
||||
writer.close();
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
metadata = store.getMetadata();
|
||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
||||
for (StoreFileMetaData meta : metadata) {
|
||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
if (meta.checksum() == null) {
|
||||
String checksum = null;
|
||||
try {
|
||||
CodecUtil.retrieveChecksum(input);
|
||||
fail("expected a corrupt index - posting format has not checksums");
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
||||
checksumIndexInput.seek(meta.length());
|
||||
checksum = Store.digestToString(checksumIndexInput.getChecksum());
|
||||
}
|
||||
// fine - it's a postings format without checksums
|
||||
checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
|
||||
}
|
||||
} else {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
}
|
||||
}
|
||||
}
|
||||
assertConsistent(store, metadata);
|
||||
checksums.write(store);
|
||||
metadata = store.getMetadata();
|
||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
||||
for (StoreFileMetaData meta : metadata) {
|
||||
assertThat("file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
|
||||
if (meta.hasLegacyChecksum()) {
|
||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
||||
checksumIndexInput.seek(meta.length());
|
||||
assertThat(meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
|
||||
}
|
||||
} else {
|
||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
}
|
||||
}
|
||||
}
|
||||
assertConsistent(store, metadata);
|
||||
TestUtil.checkIndex(store.directory());
|
||||
assertDeleteContent(store, directoryService);
|
||||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testRenameFile() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
|
||||
|
@ -654,18 +473,7 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final Adler32 adler32 = new Adler32();
|
||||
long legacyFileLength = 0;
|
||||
try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) {
|
||||
int iters = scaledRandomIntBetween(10, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
|
||||
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
legacyFileLength += bytesRef.length;
|
||||
}
|
||||
}
|
||||
final long luceneChecksum;
|
||||
final long adler32LegacyChecksum = adler32.getValue();
|
||||
try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
|
||||
assertEquals(luceneFileLength, indexInput.length());
|
||||
luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
|
||||
|
@ -673,38 +481,22 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
{ // positive check
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertTrue(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertTrue(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong checksum
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong length
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong file
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
dir.close();
|
||||
|
||||
|
@ -827,7 +619,7 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && !Store.isChecksum(file) && file.startsWith("extra") == false) {
|
||||
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
|
||||
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||
} else {
|
||||
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||
|
@ -835,21 +627,6 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy indices without lucene CRC32 did never write or calculate checksums for segments_N files
|
||||
* but for other files
|
||||
*/
|
||||
public void testRecoveryDiffWithLegacyCommit() {
|
||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
||||
Store.MetadataSnapshot first = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
||||
|
||||
Store.MetadataSnapshot second = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
||||
Store.RecoveryDiff recoveryDiff = first.recoveryDiff(second);
|
||||
assertEquals(recoveryDiff.toString(), recoveryDiff.different.size(), 2);
|
||||
}
|
||||
|
||||
public void testRecoveryDiff() throws IOException, InterruptedException {
|
||||
int numDocs = 2 + random().nextInt(100);
|
||||
List<Document> docs = new ArrayList<>();
|
||||
|
@ -1043,21 +820,6 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
Store.MetadataSnapshot secondMeta = store.getMetadata();
|
||||
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
BytesRef hash = new BytesRef();
|
||||
if (file.startsWith("segments")) {
|
||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
||||
}
|
||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
||||
legacyMeta.put(file, storeFileMetaData);
|
||||
checksums.add(storeFileMetaData);
|
||||
}
|
||||
checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups
|
||||
|
||||
if (randomBoolean()) {
|
||||
store.cleanupAndVerify("test", firstMeta);
|
||||
|
@ -1068,16 +830,13 @@ public class StoreTests extends ESTestCase {
|
|||
if (file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
||||
if (Store.isChecksum(file)) {
|
||||
numChecksums++;
|
||||
} else if (secondMeta.contains(file) == false) {
|
||||
assertTrue(firstMeta.contains(file) || file.equals("write.lock"));
|
||||
if (secondMeta.contains(file) == false) {
|
||||
numNotFound++;
|
||||
}
|
||||
|
||||
}
|
||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
||||
} else {
|
||||
store.cleanupAndVerify("test", secondMeta);
|
||||
String[] strings = store.directory().listAll();
|
||||
|
@ -1087,16 +846,13 @@ public class StoreTests extends ESTestCase {
|
|||
if (file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
||||
if (Store.isChecksum(file)) {
|
||||
numChecksums++;
|
||||
} else if (firstMeta.contains(file) == false) {
|
||||
assertTrue(file, secondMeta.contains(file) || file.equals("write.lock"));
|
||||
if (firstMeta.contains(file) == false) {
|
||||
numNotFound++;
|
||||
}
|
||||
|
||||
}
|
||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
||||
}
|
||||
|
||||
deleteContent(store.directory());
|
||||
|
@ -1105,8 +861,8 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
public void testCleanUpWithLegacyChecksums() throws IOException {
|
||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, "foobar", Version.LUCENE_4_8_0, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", Version.LUCENE_4_8_0, new BytesRef()));
|
||||
Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(unmodifiableMap(metaDataMap), emptyMap(), 0);
|
||||
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
|
@ -1232,8 +988,8 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected Store.MetadataSnapshot createMetaDataSnapshot() {
|
||||
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1);
|
||||
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1);
|
||||
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666");
|
||||
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666");
|
||||
Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
|
||||
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
|
||||
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
|
||||
|
|
|
@ -117,7 +117,7 @@ public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
/**
|
||||
* Test case for #5030: Upgrading analysis plugins fails
|
||||
* See https://github.com/elasticsearch/elasticsearch/issues/5030
|
||||
* See https://github.com/elastic/elasticsearch/issues/5030
|
||||
*/
|
||||
public void testThatPluginAnalyzersCanBeUpdated() throws Exception {
|
||||
final XContentBuilder mapping = jsonBuilder().startObject()
|
||||
|
|
|
@ -44,8 +44,8 @@ public class ConcurrentDynamicTemplateIT extends ESIntegTestCase {
|
|||
final String fieldName = "field";
|
||||
final String mapping = "{ \"" + mappingType + "\": {" +
|
||||
"\"dynamic_templates\": ["
|
||||
+ "{ \"" + fieldName + "\": {" + "\"path_match\": \"*\"," + "\"mapping\": {" + "\"type\": \"string\"," + "\"store\": true,"
|
||||
+ "\"index\": \"analyzed\", \"analyzer\": \"whitespace\" } } } ] } }";
|
||||
+ "{ \"" + fieldName + "\": {" + "\"path_match\": \"*\"," + "\"mapping\": {" + "\"type\": \"text\"," + "\"store\": true,"
|
||||
+ "\"analyzer\": \"whitespace\" } } } ] } }";
|
||||
// The 'fieldNames' array is used to help with retrieval of index terms
|
||||
// after testing
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
settingsBuilder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 0)
|
||||
).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
|
||||
).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}")
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
|
@ -112,7 +112,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
|
||||
assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
|
||||
equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"},\"date\":{\"type\":\"integer\"}}}}"));
|
||||
equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"text\"},\"date\":{\"type\":\"integer\"}}}}"));
|
||||
}
|
||||
|
||||
public void testUpdateMappingWithoutTypeMultiObjects() throws Exception {
|
||||
|
@ -141,7 +141,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
settingsBuilder()
|
||||
.put("index.number_of_shards", 2)
|
||||
.put("index.number_of_replicas", 0)
|
||||
).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
|
||||
).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}")
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
|
@ -150,17 +150,17 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
.setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}").execute().actionGet();
|
||||
fail("Expected MergeMappingException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("mapper [body] of different type, current_type [string], merged_type [integer]"));
|
||||
assertThat(e.getMessage(), containsString("mapper [body] of different type, current_type [text], merged_type [integer]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdateMappingWithNormsConflicts() throws Exception {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": false }}}}}")
|
||||
.addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": false }}}}}")
|
||||
.execute().actionGet();
|
||||
try {
|
||||
client().admin().indices().preparePutMapping("test").setType("type")
|
||||
.setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": true }}}}}").execute()
|
||||
.setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": true }}}}}").execute()
|
||||
.actionGet();
|
||||
fail("Expected MergeMappingException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -169,7 +169,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
|
||||
Second regression test for https://github.com/elastic/elasticsearch/issues/3381
|
||||
*/
|
||||
public void testUpdateMappingNoChanges() throws Exception {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
|
@ -177,12 +177,12 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
settingsBuilder()
|
||||
.put("index.number_of_shards", 2)
|
||||
.put("index.number_of_replicas", 0)
|
||||
).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
|
||||
).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}")
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
|
||||
.setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
|
||||
.setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}")
|
||||
.execute().actionGet();
|
||||
|
||||
//no changes, we return
|
||||
|
|
|
@ -148,7 +148,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
|
||||
// Create an index where the mappings have a field data filter
|
||||
assertAcked(prepareCreate("ramtest").setSource("{\"mappings\": {\"type\": {\"properties\": {\"test\": " +
|
||||
"{\"type\": \"string\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}"));
|
||||
"{\"type\": \"text\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}"));
|
||||
|
||||
ensureGreen("ramtest");
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -25,6 +27,7 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
|
@ -35,7 +38,7 @@ import java.util.regex.Pattern;
|
|||
/**
|
||||
*/
|
||||
public class RecoveryStatusTests extends ESSingleNodeTestCase {
|
||||
|
||||
|
||||
public void testRenameTempFiles() throws IOException {
|
||||
IndexService service = createIndex("foo");
|
||||
|
||||
|
@ -50,14 +53,16 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase {
|
|||
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
||||
}
|
||||
});
|
||||
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store())) {
|
||||
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) {
|
||||
indexOutput.writeInt(1);
|
||||
IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
|
||||
assertSame(openIndexOutput, indexOutput);
|
||||
openIndexOutput.writeInt(1);
|
||||
CodecUtil.writeFooter(indexOutput);
|
||||
}
|
||||
|
||||
try {
|
||||
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store());
|
||||
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store());
|
||||
fail("file foo.bar is already opened and registered");
|
||||
} catch (IllegalStateException ex) {
|
||||
assertEquals("output for file [foo.bar] has already been created", ex.getMessage());
|
||||
|
|
|
@ -749,7 +749,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test1")
|
||||
.addMapping(
|
||||
"bar",
|
||||
"{ \"properties\": { \"bar\": { \"type\": \"string\", \"fields\": { \"completion\": { \"type\": \"completion\" }}},\"baz\": { \"type\": \"string\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}"));
|
||||
"{ \"properties\": { \"bar\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}},\"baz\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}"));
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}").execute().actionGet();
|
||||
|
|
|
@ -345,7 +345,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
|
||||
client().admin().indices().preparePutTemplate("template_with_aliases")
|
||||
.setTemplate("te*")
|
||||
.addMapping("type1", "{\"type1\" : {\"properties\" : {\"value\" : {\"type\" : \"string\"}}}}")
|
||||
.addMapping("type1", "{\"type1\" : {\"properties\" : {\"value\" : {\"type\" : \"text\"}}}}")
|
||||
.addAlias(new Alias("simple_alias"))
|
||||
.addAlias(new Alias("templated_alias-{index}"))
|
||||
.addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}"))
|
||||
|
|
|
@ -38,13 +38,8 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
||||
@ClusterScope(scope= Scope.TEST, numDataNodes = 0)
|
||||
public class SimpleNodesInfoIT extends ESIntegTestCase {
|
||||
static final class Fields {
|
||||
static final String SITE_PLUGIN = "dummy";
|
||||
static final String SITE_PLUGIN_DESCRIPTION = "This is a description for a dummy test site plugin.";
|
||||
static final String SITE_PLUGIN_VERSION = "0.0.7-BOND-SITE";
|
||||
}
|
||||
|
||||
public void testNodesInfos() throws Exception {
|
||||
List<String> nodesIds = internalCluster().startNodesAsync(2).get();
|
||||
|
|
|
@ -1683,10 +1683,10 @@ public class PercolatorIT extends ESIntegTestCase {
|
|||
String mapping = "{\n" +
|
||||
" \"doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"name\": {\"type\":\"string\"},\n" +
|
||||
" \"name\": {\"type\":\"text\"},\n" +
|
||||
" \"persons\": {\n" +
|
||||
" \"type\": \"nested\"\n," +
|
||||
" \"properties\" : {\"foo\" : {\"type\" : \"string\"}}" +
|
||||
" \"properties\" : {\"foo\" : {\"type\" : \"text\"}}" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
|
|
|
@ -249,7 +249,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
See https://github.com/elasticsearch/elasticsearch/issues/2682
|
||||
See https://github.com/elastic/elasticsearch/issues/2682
|
||||
Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
|
||||
to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShards.
|
||||
That affected the number of shards that we executed the search on, thus some documents were missing in the search results.
|
||||
|
@ -273,7 +273,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
See https://github.com/elasticsearch/elasticsearch/pull/3268
|
||||
See https://github.com/elastic/elasticsearch/pull/3268
|
||||
Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
|
||||
to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShardsCount.
|
||||
That could cause returning 1, which led to forcing the QUERY_AND_FETCH mode.
|
||||
|
|
|
@ -61,12 +61,12 @@ public class DiversifiedSamplerIT extends ESIntegTestCase {
|
|||
@Override
|
||||
public void setupSuiteScopeCluster() throws Exception {
|
||||
assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0).addMapping(
|
||||
"book", "author", "type=string,index=not_analyzed", "name", "type=string,index=analyzed", "genre",
|
||||
"type=string,index=not_analyzed", "price", "type=float"));
|
||||
"book", "author", "type=keyword", "name", "type=keyword", "genre",
|
||||
"type=keyword", "price", "type=float"));
|
||||
createIndex("idx_unmapped");
|
||||
// idx_unmapped_author is same as main index but missing author field
|
||||
assertAcked(prepareCreate("idx_unmapped_author").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.addMapping("book", "name", "type=string,index=analyzed", "genre", "type=string,index=not_analyzed", "price",
|
||||
.addMapping("book", "name", "type=keyword", "genre", "type=keyword", "price",
|
||||
"type=float"));
|
||||
|
||||
ensureGreen();
|
||||
|
|
|
@ -106,7 +106,7 @@ public class FilterIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
// See NullPointer issue when filters are empty:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/8438
|
||||
// https://github.com/elastic/elasticsearch/issues/8438
|
||||
public void testEmptyFilterDeclarations() throws Exception {
|
||||
QueryBuilder emptyFilter = new BoolQueryBuilder();
|
||||
SearchResponse response = client().prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)).execute().actionGet();
|
||||
|
|
|
@ -132,7 +132,7 @@ public class FiltersIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
// See NullPointer issue when filters are empty:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/8438
|
||||
// https://github.com/elastic/elasticsearch/issues/8438
|
||||
public void testEmptyFilterDeclarations() throws Exception {
|
||||
QueryBuilder<?> emptyFilter = new BoolQueryBuilder();
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
|
|
|
@ -360,7 +360,7 @@ public class NestedIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// Test based on: https://github.com/elasticsearch/elasticsearch/issues/9280
|
||||
// Test based on: https://github.com/elastic/elasticsearch/issues/9280
|
||||
public void testParentFilterResolvedCorrectly() throws Exception {
|
||||
XContentBuilder mapping = jsonBuilder().startObject().startObject("provider").startObject("properties")
|
||||
.startObject("comments")
|
||||
|
|
|
@ -94,7 +94,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testPlugin() throws Exception {
|
||||
String type = randomBoolean() ? "string" : "long";
|
||||
String type = randomBoolean() ? "text" : "long";
|
||||
String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
|
||||
SharedSignificantTermsTestMethods.index01Docs(type, settings, this);
|
||||
SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
|
||||
|
@ -257,7 +257,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testXContentResponse() throws Exception {
|
||||
String type = randomBoolean() ? "string" : "long";
|
||||
String type = randomBoolean() ? "text" : "long";
|
||||
String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
|
||||
SharedSignificantTermsTestMethods.index01Docs(type, settings, this);
|
||||
SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
|
||||
|
@ -309,7 +309,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
indexRandom(true, false, indexRequestBuilderList);
|
||||
|
||||
// Now create some holes in the index with selective deletes caused by updates.
|
||||
// This is the scenario that caused this issue https://github.com/elasticsearch/elasticsearch/issues/7951
|
||||
// This is the scenario that caused this issue https://github.com/elastic/elasticsearch/issues/7951
|
||||
// Scoring algorithms throw exceptions if term docFreqs exceed the reported size of the index
|
||||
// from which they are taken so need to make sure this doesn't happen.
|
||||
String[] text = cat1v1;
|
||||
|
@ -333,7 +333,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testBackgroundVsSeparateSet() throws Exception {
|
||||
String type = randomBoolean() ? "string" : "long";
|
||||
String type = randomBoolean() ? "text" : "long";
|
||||
String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
|
||||
SharedSignificantTermsTestMethods.index01Docs(type, settings, this);
|
||||
testBackgroundVsSeparateSet(new MutualInformation(true, true), new MutualInformation(true, false));
|
||||
|
@ -460,7 +460,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testScriptScore() throws ExecutionException, InterruptedException, IOException {
|
||||
indexRandomFrequencies01(randomBoolean() ? "string" : "long");
|
||||
indexRandomFrequencies01(randomBoolean() ? "text" : "long");
|
||||
ScriptHeuristic scriptHeuristic = getScriptSignificanceHeuristic();
|
||||
ensureYellow();
|
||||
SearchResponse response = client().prepareSearch(INDEX_NAME)
|
||||
|
|
|
@ -49,9 +49,9 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase {
|
|||
return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString();
|
||||
}
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/5998
|
||||
// see https://github.com/elastic/elasticsearch/issues/5998
|
||||
public void testShardMinDocCountSignificantTermsTest() throws Exception {
|
||||
String termtype = "string";
|
||||
String termtype = "text";
|
||||
if (randomBoolean()) {
|
||||
termtype = "long";
|
||||
}
|
||||
|
@ -107,9 +107,9 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/5998
|
||||
// see https://github.com/elastic/elasticsearch/issues/5998
|
||||
public void testShardMinDocCountTermsTest() throws Exception {
|
||||
final String [] termTypes = {"string", "long", "integer", "float", "double"};
|
||||
final String [] termTypes = {"text", "long", "integer", "float", "double"};
|
||||
String termtype = termTypes[randomInt(termTypes.length - 1)];
|
||||
|
||||
assertAcked(prepareCreate(index).setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0).addMapping(type, "{\"properties\":{\"text\": {\"type\": \"" + termtype + "\"}}}"));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue