mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 01:19:02 +00:00
Merge branch 'master' into feature/ingest
This commit is contained in:
commit
6c43137413
@ -80,3 +80,13 @@ eclipse {
|
||||
defaultOutputDir = new File(file('build'), 'eclipse')
|
||||
}
|
||||
}
|
||||
|
||||
task copyEclipseSettings(type: Copy) {
|
||||
from project.file('src/main/resources/eclipse.settings')
|
||||
into '.settings'
|
||||
}
|
||||
// otherwise .settings is not nuked entirely
|
||||
tasks.cleanEclipse {
|
||||
delete '.settings'
|
||||
}
|
||||
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
|
||||
|
@ -69,7 +69,7 @@ import java.util.regex.Pattern
|
||||
* </pre>
|
||||
*/
|
||||
public class DependencyLicensesTask extends DefaultTask {
|
||||
private static final String SHA_EXTENSION = '.sha1'
|
||||
static final String SHA_EXTENSION = '.sha1'
|
||||
|
||||
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
|
||||
// check from distribution to core (ie this should only be run on java projects)
|
||||
@ -106,16 +106,15 @@ public class DependencyLicensesTask extends DefaultTask {
|
||||
|
||||
@TaskAction
|
||||
public void checkDependencies() {
|
||||
// TODO REMOVE THIS DIRTY FIX FOR #15168
|
||||
if (licensesDir.exists() == false) {
|
||||
return
|
||||
}
|
||||
if (licensesDir.exists() == false && dependencies.isEmpty() == false) {
|
||||
if (dependencies.isEmpty()) {
|
||||
if (licensesDir.exists()) {
|
||||
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
|
||||
}
|
||||
return // no dependencies to check
|
||||
} else if (licensesDir.exists() == false) {
|
||||
throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
|
||||
}
|
||||
if (licensesDir.exists() && dependencies.isEmpty()) {
|
||||
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
|
||||
}
|
||||
|
||||
|
||||
// order is the same for keys and values iteration since we use a linked hashmap
|
||||
List<String> mapped = new ArrayList<>(mappings.values())
|
||||
|
@ -68,9 +68,7 @@ class PrecommitTasks {
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += [
|
||||
getClass().getResource('/forbidden/core-signatures.txt'),
|
||||
getClass().getResource('/forbidden/third-party-signatures.txt')]
|
||||
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
|
@ -35,6 +35,7 @@ public class UpdateShasTask extends DefaultTask {
|
||||
|
||||
public UpdateShasTask() {
|
||||
description = 'Updates the sha files for the dependencyLicenses check'
|
||||
onlyIf { parentTask.licensesDir.exists() }
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
@ -42,13 +43,13 @@ public class UpdateShasTask extends DefaultTask {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
if (name.endsWith(SHA_EXTENSION)) {
|
||||
if (name.endsWith(DependencyLicensesTask.SHA_EXTENSION)) {
|
||||
shaFiles.add(it)
|
||||
}
|
||||
}
|
||||
for (File dependency : parentTask.dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + SHA_EXTENSION)
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + DependencyLicensesTask.SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
logger.lifecycle("Adding sha for ${jarName}")
|
||||
String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()
|
||||
|
@ -112,3 +112,7 @@ java.lang.System#setProperty(java.lang.String,java.lang.String)
|
||||
java.lang.System#clearProperty(java.lang.String)
|
||||
java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view
|
||||
|
||||
@defaultMessage Avoid unchecked warnings by using Collections#empty(List|Map|Set) methods
|
||||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
@ -90,3 +90,12 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
@ -1,66 +0,0 @@
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder.
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
|
||||
com.ning.compress.lzf.parallel.CompressTask
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[])
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFDecoder#fastDecoder()
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
@ -62,12 +62,9 @@ dependencies {
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
|
||||
|
||||
// network stack
|
||||
compile 'io.netty:netty:3.10.5.Final'
|
||||
// compression of transport protocol
|
||||
compile 'com.ning:compress-lzf:1.0.2'
|
||||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
|
@ -554,7 +554,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
MERGE_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.MergeMappingException.class, org.elasticsearch.index.mapper.MergeMappingException::new, 87),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
|
@ -300,7 +300,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
String parent = null;
|
||||
String[] fields = defaultFields;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
String opType = null;
|
||||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
@ -333,9 +333,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
timestamp = parser.text();
|
||||
} else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis();
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
|
||||
} else {
|
||||
ttl = parser.longValue();
|
||||
ttl = new TimeValue(parser.longValue());
|
||||
}
|
||||
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
|
||||
opType = parser.text();
|
||||
|
@ -335,7 +335,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index());
|
||||
}
|
||||
|
||||
return executeIndexRequestOnPrimary(request, indexRequest, indexShard);
|
||||
return executeIndexRequestOnPrimary(indexRequest, indexShard);
|
||||
}
|
||||
|
||||
private WriteResult<DeleteResponse> shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) {
|
||||
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -136,7 +137,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
private String parent;
|
||||
@Nullable
|
||||
private String timestamp;
|
||||
private long ttl = -1;
|
||||
@Nullable
|
||||
private TimeValue ttl;
|
||||
|
||||
private BytesReference source;
|
||||
|
||||
@ -229,6 +231,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
if (!versionType.validateVersionForWrites(version)) {
|
||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||
}
|
||||
|
||||
if (ttl != null) {
|
||||
if (ttl.millis() < 0) {
|
||||
validationException = addValidationError("ttl must not be negative", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@ -324,22 +332,33 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise. Setting it
|
||||
* to <tt>null</tt> will reset to have no ttl.
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException {
|
||||
if (ttl == null) {
|
||||
this.ttl = -1;
|
||||
return this;
|
||||
}
|
||||
if (ttl <= 0) {
|
||||
throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
|
||||
}
|
||||
public IndexRequest ttl(String ttl) {
|
||||
this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequest ttl(TimeValue ttl) {
|
||||
this.ttl = ttl;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long ttl() {
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequest ttl(long ttl) {
|
||||
this.ttl = new TimeValue(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ttl as a {@link TimeValue}
|
||||
*/
|
||||
public TimeValue ttl() {
|
||||
return this.ttl;
|
||||
}
|
||||
|
||||
@ -665,7 +684,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
timestamp = in.readOptionalString();
|
||||
ttl = in.readLong();
|
||||
ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
|
||||
source = in.readBytesReference();
|
||||
|
||||
opType = OpType.fromId(in.readByte());
|
||||
@ -682,7 +701,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
out.writeOptionalString(timestamp);
|
||||
out.writeLong(ttl);
|
||||
if (ttl == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ttl.writeTo(out);
|
||||
}
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
out.writeBoolean(refresh);
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -254,9 +255,27 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
||||
return this;
|
||||
}
|
||||
|
||||
// Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
|
||||
/**
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(String ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(long ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(TimeValue ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
|
||||
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard);
|
||||
|
||||
final IndexResponse response = result.response;
|
||||
final Translog.Location location = result.location;
|
||||
|
@ -223,7 +223,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
throw requestBlockException;
|
||||
}
|
||||
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
}
|
||||
ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
|
||||
nodeIds = new HashMap<>();
|
||||
|
||||
@ -300,7 +302,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
}
|
||||
|
||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
// the current implementation of TransportService#sendRequest guards against this
|
||||
@ -351,7 +355,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
|
||||
List<ShardRouting> shards = request.getShards();
|
||||
final int totalShards = shards.size();
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
}
|
||||
final Object[] shardResultOrExceptions = new Object[totalShards];
|
||||
|
||||
int shardIndex = -1;
|
||||
@ -375,10 +381,14 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
|
||||
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
|
||||
try {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
|
||||
shardResults[shardIndex] = result;
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
||||
e.setIndex(shardRouting.getIndex());
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest.OpType;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
@ -1074,23 +1073,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
/** Utility method to create either an index or a create operation depending
|
||||
* on the {@link OpType} of the request. */
|
||||
private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
||||
private Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
||||
|
||||
}
|
||||
|
||||
/** Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
|
||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (update != null) {
|
||||
final String indexName = shardId.getIndex();
|
||||
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
||||
operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnPrimaryException(shardId,
|
||||
|
@ -88,7 +88,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
|
||||
}
|
||||
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
|
||||
Long ttl = indexRequest.ttl();
|
||||
TimeValue ttl = indexRequest.ttl();
|
||||
if (request.scriptedUpsert() && request.script() != null) {
|
||||
// Run the script to perform the create logic
|
||||
IndexRequest upsert = request.upsertRequest();
|
||||
@ -99,7 +99,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
ctx.put("_source", upsertDoc);
|
||||
ctx = executeScript(request, ctx);
|
||||
//Allow the script to set TTL using ctx._ttl
|
||||
if (ttl < 0) {
|
||||
if (ttl == null) {
|
||||
ttl = getTTLFromScriptContext(ctx);
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
indexRequest.index(request.index()).type(request.type()).id(request.id())
|
||||
// it has to be a "create!"
|
||||
.create(true)
|
||||
.ttl(ttl == null || ttl < 0 ? null : ttl)
|
||||
.ttl(ttl)
|
||||
.refresh(request.refresh())
|
||||
.routing(request.routing())
|
||||
.parent(request.parent())
|
||||
@ -151,7 +151,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
||||
String operation = null;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
final Map<String, Object> updatedSourceAsMap;
|
||||
final XContentType updateSourceContentType = sourceAndContent.v1();
|
||||
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
|
||||
@ -160,7 +160,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
if (request.script() == null && request.doc() != null) {
|
||||
IndexRequest indexRequest = request.doc();
|
||||
updatedSourceAsMap = sourceAndContent.v2();
|
||||
if (indexRequest.ttl() > 0) {
|
||||
if (indexRequest.ttl() != null) {
|
||||
ttl = indexRequest.ttl();
|
||||
}
|
||||
timestamp = indexRequest.timestamp();
|
||||
@ -211,9 +211,9 @@ public class UpdateHelper extends AbstractComponent {
|
||||
// apply script to update the source
|
||||
// No TTL has been given in the update script so we keep previous TTL value if there is one
|
||||
if (ttl == null) {
|
||||
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttl != null) {
|
||||
ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved
|
||||
Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttlAsLong != null) {
|
||||
ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,17 +256,15 @@ public class UpdateHelper extends AbstractComponent {
|
||||
return ctx;
|
||||
}
|
||||
|
||||
private Long getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Long ttl = null;
|
||||
private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Object fetchedTTL = ctx.get("_ttl");
|
||||
if (fetchedTTL != null) {
|
||||
if (fetchedTTL instanceof Number) {
|
||||
ttl = ((Number) fetchedTTL).longValue();
|
||||
} else {
|
||||
ttl = TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl").millis();
|
||||
return new TimeValue(((Number) fetchedTTL).longValue());
|
||||
}
|
||||
return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
|
||||
}
|
||||
return ttl;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -337,13 +335,10 @@ public class UpdateHelper extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
public static enum Operation {
|
||||
|
||||
public enum Operation {
|
||||
UPSERT,
|
||||
INDEX,
|
||||
DELETE,
|
||||
NONE
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -325,7 +326,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document. Note that if detectNoop is true (the default)
|
||||
* Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
@ -333,4 +334,24 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(String ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(TimeValue ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -469,6 +469,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
@ -584,6 +594,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
|
||||
public Builder routingResult(RoutingAllocation.Result routingResult) {
|
||||
this.routingTable = routingResult.routingTable();
|
||||
this.metaData = routingResult.metaData();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -759,7 +770,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
nodes = after.nodes.diff(before.nodes);
|
||||
metaData = after.metaData.diff(before.metaData);
|
||||
blocks = after.blocks.diff(before.blocks);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
|
||||
@ -771,14 +782,15 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
nodes = proto.nodes.readDiffFrom(in);
|
||||
metaData = proto.metaData.readDiffFrom(in);
|
||||
blocks = proto.blocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
public interface Diff<T> {
|
||||
|
||||
/**
|
||||
* Applies difference to the specified part and retunrs the resulted part
|
||||
* Applies difference to the specified part and returns the resulted part
|
||||
*/
|
||||
T apply(T part);
|
||||
|
||||
|
@ -19,263 +19,630 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public final class DiffableUtils {
|
||||
private DiffableUtils() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for String keys
|
||||
*/
|
||||
public static KeySerializer<String> getStringKeySerializer() {
|
||||
return StringKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as Int.
|
||||
*/
|
||||
public static KeySerializer<Integer> getIntKeySerializer() {
|
||||
return IntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as VInt.
|
||||
*/
|
||||
public static KeySerializer<Integer> getVIntKeySerializer() {
|
||||
return VIntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> diff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after);
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
|
||||
*/
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of Diffable objects.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after);
|
||||
return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keyedReader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new JdkMapDiff<>(in, keyedReader);
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's
|
||||
* used in custom metadata deserialization.
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public interface KeyedReader<T> {
|
||||
|
||||
/**
|
||||
* reads an object of the type T from the stream input
|
||||
*/
|
||||
T readFrom(StreamInput in, String key) throws IOException;
|
||||
|
||||
/**
|
||||
* reads an object that respresents differences between two objects with the type T from the stream input
|
||||
*/
|
||||
Diff<T> readDiffFrom(StreamInput in, String key) throws IOException;
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the KeyedReader that is using a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
* Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static class PrototypeReader<T extends Diffable<T>> implements KeyedReader<T> {
|
||||
private T proto;
|
||||
|
||||
public PrototypeReader(T proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T readFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<T> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of Diffable objects.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of (possibly diffable) objects.
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
*/
|
||||
private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> {
|
||||
private static class JdkMapDiff<K, T> extends MapDiff<K, T, Map<K, T>> {
|
||||
|
||||
protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected JdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public JdkMapDiff(Map<String, T> before, Map<String, T> after) {
|
||||
public JdkMapDiff(Map<K, T> before, Map<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (String key : before.keySet()) {
|
||||
|
||||
for (K key : before.keySet()) {
|
||||
if (!after.containsKey(key)) {
|
||||
deletes.add(key);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, T> partIter : after.entrySet()) {
|
||||
|
||||
for (Map.Entry<K, T> partIter : after.entrySet()) {
|
||||
T beforePart = before.get(partIter.getKey());
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.getKey(), partIter.getValue());
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
} else if (partIter.getValue().equals(beforePart) == false) {
|
||||
diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, T> apply(Map<String, T> map) {
|
||||
Map<String, T> builder = new HashMap<>();
|
||||
public Map<K, T> apply(Map<K, T> map) {
|
||||
Map<K, T> builder = new HashMap<>();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two ImmutableOpenMap of diffable objects
|
||||
* Represents differences between two ImmutableOpenMap of (possibly diffable) objects
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static class ImmutableOpenMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableOpenMap<String, T>> {
|
||||
private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
|
||||
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (ObjectCursor<String> key : before.keys()) {
|
||||
|
||||
for (ObjectCursor<K> key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
for (ObjectObjectCursor<String, T> partIter : after) {
|
||||
|
||||
for (ObjectObjectCursor<K, T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.key, partIter.value);
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
diffs.put(partIter.key, partIter.value.diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenMap<String, T> apply(ImmutableOpenMap<String, T> map) {
|
||||
ImmutableOpenMap.Builder<String, T> builder = ImmutableOpenMap.builder();
|
||||
public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
|
||||
ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of diffable objects
|
||||
* Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects
|
||||
*
|
||||
* This class is used as base class for different map implementations
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static abstract class MapDiff<T extends Diffable<T>, M> implements Diff<M> {
|
||||
private static class ImmutableOpenIntMapDiff<T> extends MapDiff<Integer, T, ImmutableOpenIntMap<T>> {
|
||||
|
||||
protected final List<String> deletes;
|
||||
protected final Map<String, Diff<T>> diffs;
|
||||
protected final Map<String, T> adds;
|
||||
|
||||
protected MapDiff() {
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
public ImmutableOpenIntMapDiff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after,
|
||||
KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
|
||||
for (IntCursor key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
|
||||
for (IntObjectCursor<T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenIntMap<T> apply(ImmutableOpenIntMap<T> map) {
|
||||
ImmutableOpenIntMap.Builder<T> builder = ImmutableOpenIntMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (Integer part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of objects and is used as base class for different map implementations.
|
||||
*
|
||||
* Implements serialization. How differences are applied is left to subclasses.
|
||||
*
|
||||
* @param <K> the type of map keys
|
||||
* @param <T> the type of map values
|
||||
* @param <M> the map implementation type
|
||||
*/
|
||||
public static abstract class MapDiff<K, T, M> implements Diff<M> {
|
||||
|
||||
protected final List<K> deletes;
|
||||
protected final Map<K, Diff<T>> diffs; // incremental updates
|
||||
protected final Map<K, T> upserts; // additions or full updates
|
||||
protected final KeySerializer<K> keySerializer;
|
||||
protected final ValueSerializer<K, T> valueSerializer;
|
||||
|
||||
protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
int deletesCount = in.readVInt();
|
||||
for (int i = 0; i < deletesCount; i++) {
|
||||
deletes.add(in.readString());
|
||||
deletes.add(keySerializer.readKey(in));
|
||||
}
|
||||
|
||||
int diffsCount = in.readVInt();
|
||||
for (int i = 0; i < diffsCount; i++) {
|
||||
String key = in.readString();
|
||||
Diff<T> diff = reader.readDiffFrom(in, key);
|
||||
K key = keySerializer.readKey(in);
|
||||
Diff<T> diff = valueSerializer.readDiff(in, key);
|
||||
diffs.put(key, diff);
|
||||
}
|
||||
|
||||
int addsCount = in.readVInt();
|
||||
for (int i = 0; i < addsCount; i++) {
|
||||
String key = in.readString();
|
||||
T part = reader.readFrom(in, key);
|
||||
adds.put(key, part);
|
||||
int upsertsCount = in.readVInt();
|
||||
for (int i = 0; i < upsertsCount; i++) {
|
||||
K key = keySerializer.readKey(in);
|
||||
T newValue = valueSerializer.read(in, key);
|
||||
upserts.put(key, newValue);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The keys that, when this diff is applied to a map, should be removed from the map.
|
||||
*
|
||||
* @return the list of keys that are deleted
|
||||
*/
|
||||
public List<K> getDeletes() {
|
||||
return deletes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* incrementally updated. The incremental update is represented using
|
||||
* the {@link Diff} interface.
|
||||
*
|
||||
* @return the map entries that are incrementally updated
|
||||
*/
|
||||
public Map<K, Diff<T>> getDiffs() {
|
||||
return diffs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* added to the map or fully replace the previous value.
|
||||
*
|
||||
* @return the map entries that are additions or full updates
|
||||
*/
|
||||
public Map<K, T> getUpserts() {
|
||||
return upserts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(deletes.size());
|
||||
for (String delete : deletes) {
|
||||
out.writeString(delete);
|
||||
for (K delete : deletes) {
|
||||
keySerializer.writeKey(delete, out);
|
||||
}
|
||||
|
||||
out.writeVInt(diffs.size());
|
||||
for (Map.Entry<String, Diff<T>> entry : diffs.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.writeDiff(entry.getValue(), out);
|
||||
}
|
||||
|
||||
out.writeVInt(adds.size());
|
||||
for (Map.Entry<String, T> entry : adds.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
out.writeVInt(upserts.size());
|
||||
for (Map.Entry<K, T> entry : upserts.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.write(entry.getValue(), out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize keys of map
|
||||
* @param <K> type of key
|
||||
*/
|
||||
public interface KeySerializer<K> {
|
||||
void writeKey(K key, StreamOutput out) throws IOException;
|
||||
K readKey(StreamInput in) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes String keys of a map
|
||||
*/
|
||||
private static final class StringKeySerializer implements KeySerializer<String> {
|
||||
private static final StringKeySerializer INSTANCE = new StringKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(String key, StreamOutput out) throws IOException {
|
||||
out.writeString(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readKey(StreamInput in) throws IOException {
|
||||
return in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as an Int
|
||||
*/
|
||||
private static final class IntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
out.writeInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as a VInt. Requires keys to be positive.
|
||||
*/
|
||||
private static final class VIntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
if (key < 0) {
|
||||
throw new IllegalArgumentException("Map key [" + key + "] must be positive");
|
||||
}
|
||||
out.writeVInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readVInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize map values.
|
||||
* Reading of values can be made dependent on map key.
|
||||
*
|
||||
* Also provides operations to distinguish whether map values are diffable.
|
||||
*
|
||||
* Should not be directly implemented, instead implement either
|
||||
* {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
|
||||
*
|
||||
* @param <K> key type of map
|
||||
* @param <V> value type of map
|
||||
*/
|
||||
public interface ValueSerializer<K, V> {
|
||||
|
||||
/**
|
||||
* Writes value to stream
|
||||
*/
|
||||
void write(V value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value from stream. Reading operation can be made dependent on map key.
|
||||
*/
|
||||
V read(StreamInput in, K key) throws IOException;
|
||||
|
||||
/**
|
||||
* Whether this serializer supports diffable values
|
||||
*/
|
||||
boolean supportsDiffableValues();
|
||||
|
||||
/**
|
||||
* Computes diff if this serializer supports diffable values
|
||||
*/
|
||||
Diff<V> diff(V value, V beforePart);
|
||||
|
||||
/**
|
||||
* Writes value as diff to stream if this serializer supports diffable values
|
||||
*/
|
||||
void writeDiff(Diff<V> value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value as diff from stream if this serializer supports diffable values.
|
||||
* Reading operation can be made dependent on map key.
|
||||
*/
|
||||
Diff<V> readDiff(StreamInput in, K key) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for Diffable map values. Needs to implement read and readDiff methods.
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
|
||||
private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
|
||||
@Override
|
||||
public Object read(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Object> readDiff(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
|
||||
private static <K, V extends Diffable<V>> DiffableValueSerializer<K, V> getWriteOnlyInstance() {
|
||||
return WRITE_ONLY_INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
return value.diff(beforePart);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(V value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for non-diffable map values
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the ValueSerializer that uses a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
*/
|
||||
public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
|
||||
private final V proto;
|
||||
|
||||
public DiffablePrototypeValueReader(V proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public V read(StreamInput in, K key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of ValueSerializer that serializes immutable sets
|
||||
*
|
||||
* @param <K> type of map key
|
||||
*/
|
||||
public static class StringSetValueSerializer<K> extends NonDiffableValueSerializer<K, Set<String>> {
|
||||
private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer();
|
||||
|
||||
public static <K> StringSetValueSerializer<K> getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Set<String> value, StreamOutput out) throws IOException {
|
||||
out.writeStringArray(value.toArray(new String[value.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> read(StreamInput in, K key) throws IOException {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(in.readStringArray())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -57,7 +56,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
if (nodes.masterNode() == null) {
|
||||
logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types()));
|
||||
logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
@ -67,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
@ -76,16 +75,14 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
|
||||
private String index;
|
||||
private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
private String[] types;
|
||||
private String nodeId;
|
||||
|
||||
public NodeMappingRefreshRequest() {
|
||||
}
|
||||
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
this.types = types;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
@ -107,11 +104,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
return indexUUID;
|
||||
}
|
||||
|
||||
|
||||
public String[] types() {
|
||||
return types;
|
||||
}
|
||||
|
||||
public String nodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
@ -120,7 +112,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeStringArray(types);
|
||||
out.writeString(nodeId);
|
||||
out.writeString(indexUUID);
|
||||
}
|
||||
@ -129,7 +120,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
types = in.readStringArray();
|
||||
nodeId = in.readString();
|
||||
indexUUID = in.readString();
|
||||
}
|
||||
|
@ -20,9 +20,7 @@
|
||||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
@ -38,15 +36,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
@ -63,9 +58,6 @@ public class ShardStateAction extends AbstractComponent {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
|
||||
private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
|
||||
private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
|
||||
|
||||
@Inject
|
||||
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
AllocationService allocationService, RoutingService routingService) {
|
||||
@ -141,110 +133,94 @@ public class ShardStateAction extends AbstractComponent {
|
||||
});
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
failedShardQueue.add(shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
new ClusterStateUpdateTask(Priority.HIGH) {
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
shardFailedClusterStateHandler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
failedShardQueue.drainTo(shardRoutingEntries);
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure));
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
}
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
|
||||
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
|
||||
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
|
||||
new ShardStartedClusterStateHandler();
|
||||
|
||||
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
// buffer shard started requests, and the state update tasks will simply drain it
|
||||
// this is to optimize the number of "started" events we generate, and batch them
|
||||
// possibly, we can do time based batching as well, but usually, we would want to
|
||||
// process started events as fast as possible, to make shards available
|
||||
startedShardsQueue.add(shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return Priority.URGENT;
|
||||
}
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
|
||||
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(task.shardRouting);
|
||||
}
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result =
|
||||
allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
builder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
builder.failures(tasks, t);
|
||||
}
|
||||
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
return builder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
startedShardsQueue.drainTo(shardRoutingEntries);
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRouting> shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingToBeApplied.add(entry.shardRouting);
|
||||
}
|
||||
|
||||
if (shardRoutingToBeApplied.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
@ -272,8 +248,6 @@ public class ShardStateAction extends AbstractComponent {
|
||||
String message;
|
||||
Throwable failure;
|
||||
|
||||
volatile boolean processed; // state field, no need to serialize
|
||||
|
||||
public ShardRoutingEntry() {
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
@ -30,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
@ -46,10 +48,13 @@ import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
||||
@ -168,6 +173,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
|
||||
private final int numberOfShards;
|
||||
private final int numberOfReplicas;
|
||||
|
||||
@ -184,6 +191,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
private final ImmutableOpenMap<String, Custom> customs;
|
||||
|
||||
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
|
||||
|
||||
private transient final int totalNumberOfShards;
|
||||
|
||||
private final DiscoveryNodeFilters requireFilters;
|
||||
@ -194,65 +203,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
private IndexMetaData(String index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
this.index = index;
|
||||
this.version = version;
|
||||
this.state = state;
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.numberOfReplicas = numberOfReplicas;
|
||||
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.aliases = aliases;
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
indexCreatedVersion = Version.indexCreated(settings);
|
||||
indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
|
||||
}
|
||||
} else {
|
||||
this.minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
this.activeAllocationIds = activeAllocationIds;
|
||||
this.requireFilters = requireFilters;
|
||||
this.includeFilters = includeFilters;
|
||||
this.excludeFilters = excludeFilters;
|
||||
this.indexCreatedVersion = indexCreatedVersion;
|
||||
this.indexUpgradedVersion = indexUpgradedVersion;
|
||||
this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
@ -364,6 +337,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return (T) customs.get(type);
|
||||
}
|
||||
|
||||
public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
|
||||
return activeAllocationIds;
|
||||
}
|
||||
|
||||
public Set<String> activeAllocationIds(int shardId) {
|
||||
assert shardId >= 0 && shardId < numberOfShards;
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public DiscoveryNodeFilters requireFilters() {
|
||||
return requireFilters;
|
||||
@ -408,6 +390,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
if (!customs.equals(that.customs)) {
|
||||
return false;
|
||||
}
|
||||
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -418,6 +403,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
result = 31 * result + aliases.hashCode();
|
||||
result = 31 * result + settings.hashCode();
|
||||
result = 31 * result + mappings.hashCode();
|
||||
result = 31 * result + activeAllocationIds.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -450,16 +436,19 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final Settings settings;
|
||||
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
|
||||
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
|
||||
private Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
|
||||
|
||||
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
|
||||
index = after.index;
|
||||
version = after.version;
|
||||
state = after.state;
|
||||
settings = after.settings;
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings);
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
public IndexMetaDataDiff(StreamInput in) throws IOException {
|
||||
@ -467,19 +456,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
version = in.readLong();
|
||||
state = State.fromId(in.readByte());
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader<Custom>() {
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -491,6 +483,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
mappings.writeTo(out);
|
||||
aliases.writeTo(out);
|
||||
customs.writeTo(out);
|
||||
activeAllocationIds.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -502,6 +495,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
builder.mappings.putAll(mappings.apply(part.mappings));
|
||||
builder.aliases.putAll(aliases.apply(part.aliases));
|
||||
builder.customs.putAll(customs.apply(part.customs));
|
||||
builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@ -528,6 +522,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
int activeAllocationIdsSize = in.readVInt();
|
||||
for (int i = 0; i < activeAllocationIdsSize; i++) {
|
||||
int key = in.readVInt();
|
||||
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
|
||||
builder.putActiveAllocationIds(key, allocationIds);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@ -550,6 +550,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(activeAllocationIds.size());
|
||||
for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
|
||||
out.writeVInt(cursor.key);
|
||||
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
|
||||
}
|
||||
}
|
||||
|
||||
public static Builder builder(String index) {
|
||||
@ -569,12 +574,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
private final ImmutableOpenMap.Builder<String, Custom> customs;
|
||||
private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
|
||||
|
||||
public Builder(String index) {
|
||||
this.index = index;
|
||||
this.mappings = ImmutableOpenMap.builder();
|
||||
this.aliases = ImmutableOpenMap.builder();
|
||||
this.customs = ImmutableOpenMap.builder();
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder();
|
||||
}
|
||||
|
||||
public Builder(IndexMetaData indexMetaData) {
|
||||
@ -585,6 +592,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
|
||||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
|
||||
}
|
||||
|
||||
public String index() {
|
||||
@ -693,6 +701,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return this.customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
@ -714,7 +731,72 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
// fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
|
||||
ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
|
||||
for (int i = 0; i < numberOfShards; i++) {
|
||||
if (activeAllocationIds.containsKey(i)) {
|
||||
filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
|
||||
} else {
|
||||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
final DiscoveryNodeFilters requireFilters;
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
final DiscoveryNodeFilters includeFilters;
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
final DiscoveryNodeFilters excludeFilters;
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
Version indexCreatedVersion = Version.indexCreated(settings);
|
||||
Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
|
||||
}
|
||||
} else {
|
||||
minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
|
||||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
@ -757,6 +839,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
@ -792,6 +883,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
|
||||
}
|
||||
} else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
String shardId = currentFieldName;
|
||||
Set<String> allocationIds = new HashSet<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
allocationIds.add(parser.text());
|
||||
}
|
||||
}
|
||||
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
Custom proto = lookupPrototype(currentFieldName);
|
||||
|
@ -27,7 +27,6 @@ import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
@ -41,6 +40,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
@ -54,7 +54,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
@ -640,9 +639,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
version = after.version;
|
||||
transientSettings = after.transientSettings;
|
||||
persistentSettings = after.persistentSettings;
|
||||
indices = DiffableUtils.diff(before.indices, after.indices);
|
||||
templates = DiffableUtils.diff(before.templates, after.templates);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
|
||||
templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public MetaDataDiff(StreamInput in) throws IOException {
|
||||
@ -650,16 +649,17 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
version = in.readLong();
|
||||
transientSettings = Settings.readSettingsFromStream(in);
|
||||
persistentSettings = Settings.readSettingsFromStream(in);
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@ -1029,12 +1029,18 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
|
||||
for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
|
||||
AliasMetaData aliasMetaData = aliasCursor.value;
|
||||
AliasOrIndex.Alias aliasOrIndex = (AliasOrIndex.Alias) aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
if (aliasOrIndex == null) {
|
||||
aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
|
||||
aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
|
||||
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
|
||||
alias.addIndex(indexMetaData);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Index) {
|
||||
AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
|
||||
throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index [" + index.getIndex().getIndex() + "] have the same name");
|
||||
} else {
|
||||
aliasOrIndex.addIndex(indexMetaData);
|
||||
throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
// Set up everything, now locally create the index to see that things are ok, and apply
|
||||
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
|
||||
// create the index here (on the master) to validate it can be created, as well as adding the mapping
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.EMPTY_LIST);
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
indexCreated = true;
|
||||
// now add the mappings
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
|
@ -99,7 +99,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
||||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
}
|
||||
|
@ -217,6 +217,9 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
for (Alias alias : request.aliases) {
|
||||
//we validate the alias only partially, as we don't know yet to which index it'll get applied to
|
||||
aliasValidator.validateAliasStandalone(alias);
|
||||
if (request.template.equals(alias.name())) {
|
||||
throw new IllegalArgumentException("Alias [" + alias.name() + "] cannot be the same as the template pattern [" + request.template + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,8 +218,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
try {
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST);
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
|
||||
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
|
||||
@ -256,7 +256,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
};
|
||||
|
||||
public FakeAnalysisService(IndexSettings indexSettings) {
|
||||
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
@ -69,12 +68,10 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
static class RefreshTask {
|
||||
final String index;
|
||||
final String indexUUID;
|
||||
final String[] types;
|
||||
|
||||
RefreshTask(String index, final String indexUUID, String[] types) {
|
||||
RefreshTask(String index, final String indexUUID) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
this.types = types;
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,15 +84,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
|
||||
* Batch method to apply all the queued refresh operations. The idea is to try and batch as much
|
||||
* as possible so we won't create the same index all the time for example for the updates on the same mapping
|
||||
* and generate a single cluster change event out of all of those.
|
||||
*/
|
||||
ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
|
||||
if (allTasks.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
// break down to tasks per index, so we can optimize the on demand index service creation
|
||||
// to only happen for the duration of a single index processing of its respective events
|
||||
Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
|
||||
@ -120,13 +113,16 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
||||
// the latest (based on order) update mapping one per node
|
||||
List<RefreshTask> allIndexTasks = entry.getValue();
|
||||
List<RefreshTask> tasks = new ArrayList<>();
|
||||
boolean hasTaskWithRightUUID = false;
|
||||
for (RefreshTask task : allIndexTasks) {
|
||||
if (!indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
if (indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
hasTaskWithRightUUID = true;
|
||||
} else {
|
||||
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
continue;
|
||||
}
|
||||
tasks.add(task);
|
||||
}
|
||||
if (hasTaskWithRightUUID == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// construct the actual index if needed, and make sure the relevant mappings are there
|
||||
@ -134,24 +130,17 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// we need to create the index here, and add the current mapping to it, so we can merge
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
removeIndex = true;
|
||||
Set<String> typesToIntroduce = new HashSet<>();
|
||||
for (RefreshTask task : tasks) {
|
||||
Collections.addAll(typesToIntroduce, task.types);
|
||||
}
|
||||
for (String type : typesToIntroduce) {
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(type)) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
|
||||
}
|
||||
}
|
||||
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
|
||||
try {
|
||||
boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
|
||||
boolean indexDirty = refreshIndexMapping(indexService, builder);
|
||||
if (indexDirty) {
|
||||
mdBuilder.put(builder);
|
||||
dirty = true;
|
||||
@ -169,38 +158,28 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
|
||||
private boolean processIndexMappingTasks(List<RefreshTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
||||
private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
|
||||
boolean dirty = false;
|
||||
String index = indexService.index().name();
|
||||
// keep track of what we already refreshed, no need to refresh it again...
|
||||
Set<String> processedRefreshes = new HashSet<>();
|
||||
for (RefreshTask refreshTask : tasks) {
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
}
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
||||
if (mapper == null) {
|
||||
continue;
|
||||
}
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
processedRefreshes.add(type);
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
final String type = mapper.type();
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
}
|
||||
|
||||
if (updatedTypes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
||||
}
|
||||
|
||||
// if a single type is not up-to-date, re-send everything
|
||||
if (updatedTypes.isEmpty() == false) {
|
||||
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state", t, index);
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
@ -208,9 +187,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
/**
|
||||
* Refreshes mappings if they are not the same between original and parsed version
|
||||
*/
|
||||
public void refreshMapping(final String index, final String indexUUID, final String... types) {
|
||||
final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types);
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]",
|
||||
public void refreshMapping(final String index, final String indexUUID) {
|
||||
final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]",
|
||||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
@ -228,26 +207,14 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
||||
for (String index : request.indices()) {
|
||||
if (currentState.metaData().hasIndex(index)) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
|
||||
// if we don't have the index, we will throw exceptions later;
|
||||
if (indicesService.hasIndex(index) == false || indicesToClose.contains(index)) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService;
|
||||
if (indicesService.hasIndex(index) == false) {
|
||||
indicesToClose.add(index);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
} else {
|
||||
indexService = indicesService.indexService(index);
|
||||
}
|
||||
// only add the current relevant mapping (if exists and not yet added)
|
||||
if (indexMetaData.getMappings().containsKey(request.type()) &&
|
||||
!indexService.mapperService().hasMapping(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
indicesToClose.add(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -287,7 +254,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new MergeMappingException(mergeResult.buildConflicts());
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
|
@ -314,12 +314,12 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
|
||||
public RoutingTableDiff(RoutingTable before, RoutingTable after) {
|
||||
version = after.version;
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting);
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public RoutingTableDiff(StreamInput in) throws IOException {
|
||||
version = in.readLong();
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, IndexRoutingTable.PROTO);
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -27,7 +27,14 @@ import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
@ -39,6 +46,8 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ -79,24 +88,83 @@ public class AllocationService extends AbstractComponent {
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
|
||||
boolean changed = applyStartedShards(routingNodes, startedShards);
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyStartedShards(allocation);
|
||||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
}
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
|
||||
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.metaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
|
||||
"shards started [" + startedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
|
||||
return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
|
||||
|
||||
}
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
|
||||
final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
|
||||
MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable);
|
||||
return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
|
||||
*
|
||||
* @param currentMetaData {@link MetaData} object from before the routing table was changed.
|
||||
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
|
||||
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
|
||||
*/
|
||||
static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
|
||||
// make sure index meta data and routing tables are in sync w.r.t active allocation ids
|
||||
MetaData.Builder metaDataBuilder = null;
|
||||
for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
|
||||
final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
throw new IllegalStateException("no metadata found for index [" + indexRoutingTable.index() + "]");
|
||||
}
|
||||
IndexMetaData.Builder indexMetaDataBuilder = null;
|
||||
for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
|
||||
Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.filter(Objects::nonNull)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toSet());
|
||||
// only update active allocation ids if there is an active shard
|
||||
if (activeAllocationIds.isEmpty() == false) {
|
||||
// get currently stored allocation ids
|
||||
Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
|
||||
if (activeAllocationIds.equals(storedAllocationIds) == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
}
|
||||
|
||||
indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indexMetaDataBuilder != null) {
|
||||
if (metaDataBuilder == null) {
|
||||
metaDataBuilder = MetaData.builder(currentMetaData);
|
||||
}
|
||||
metaDataBuilder.put(indexMetaDataBuilder);
|
||||
}
|
||||
}
|
||||
if (metaDataBuilder != null) {
|
||||
return metaDataBuilder.build();
|
||||
} else {
|
||||
return currentMetaData;
|
||||
}
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
}
|
||||
@ -117,16 +185,15 @@ public class AllocationService extends AbstractComponent {
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"shards failed [" + failedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
@ -169,11 +236,10 @@ public class AllocationService extends AbstractComponent {
|
||||
// the assumption is that commands will move / act on shards (or fail through exceptions)
|
||||
// so, there will always be shard "movements", so no need to check on reroute
|
||||
reroute(allocation);
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable, explanations);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"reroute commands"
|
||||
);
|
||||
return result;
|
||||
@ -200,13 +266,12 @@ public class AllocationService extends AbstractComponent {
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
reason
|
||||
);
|
||||
return result;
|
||||
|
@ -52,29 +52,33 @@ public class RoutingAllocation {
|
||||
|
||||
private final RoutingTable routingTable;
|
||||
|
||||
private final MetaData metaData;
|
||||
|
||||
private RoutingExplanations explanations = new RoutingExplanations();
|
||||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable) {
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
|
||||
this.changed = changed;
|
||||
this.routingTable = routingTable;
|
||||
this.metaData = metaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
* @param explanations Explanation for the reroute actions
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable, RoutingExplanations explanations) {
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
|
||||
this.changed = changed;
|
||||
this.routingTable = routingTable;
|
||||
this.metaData = metaData;
|
||||
this.explanations = explanations;
|
||||
}
|
||||
|
||||
@ -85,6 +89,14 @@ public class RoutingAllocation {
|
||||
return this.changed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link MetaData} referenced by this result
|
||||
* @return referenced {@link MetaData}
|
||||
*/
|
||||
public MetaData metaData() {
|
||||
return metaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link RoutingTable} referenced by this result
|
||||
* @return referenced {@link RoutingTable}
|
||||
|
@ -116,7 +116,7 @@ public abstract class Terminal {
|
||||
}
|
||||
|
||||
public void printError(Throwable t) {
|
||||
printError("%s", t.getMessage());
|
||||
printError("%s", t.toString());
|
||||
if (isDebugEnabled) {
|
||||
printStackTrace(t);
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.compress.lzf.LZFCompressor;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -42,7 +41,6 @@ public class CompressorFactory {
|
||||
|
||||
static {
|
||||
compressors = new Compressor[] {
|
||||
new LZFCompressor(),
|
||||
new DeflateCompressor()
|
||||
};
|
||||
defaultCompressor = new DeflateCompressor();
|
||||
@ -82,12 +80,23 @@ public class CompressorFactory {
|
||||
|
||||
XContentType contentType = XContentFactory.xContentType(bytes);
|
||||
if (contentType == null) {
|
||||
if (isAncient(bytes)) {
|
||||
throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
|
||||
}
|
||||
throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/** true if the bytes were compressed with LZF: only used before elasticsearch 2.0 */
|
||||
private static boolean isAncient(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == 'Z' &&
|
||||
bytes.get(1) == 'V' &&
|
||||
(bytes.get(2) == 0 || bytes.get(2) == 1);
|
||||
}
|
||||
|
||||
public static Compressor compressor(ChannelBuffer buffer) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(buffer)) {
|
||||
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.apache.lucene.store.BufferedIndexInput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressedIndexInput extends CompressedIndexInput {
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
|
||||
this.decoder = decoder;
|
||||
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
|
||||
this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readHeader(IndexInput in) throws IOException {
|
||||
byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
|
||||
in.readBytes(header, 0, header.length, false);
|
||||
if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
|
||||
throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int uncompress(IndexInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
// nothing to do here...
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput clone() {
|
||||
LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
|
||||
cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
return cloned;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput slice(String description, long offset, long length) throws IOException {
|
||||
return BufferedIndexInput.wrap(description, this, offset, length);
|
||||
}
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.BufferRecycler;
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class LZFCompressedStreamInput extends CompressedStreamInput {
|
||||
|
||||
private final BufferRecycler recycler;
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
this.recycler = BufferRecycler.instance();
|
||||
this.decoder = decoder;
|
||||
|
||||
this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readHeader(StreamInput in) throws IOException {
|
||||
// nothing to do here, each chunk has a header
|
||||
}
|
||||
|
||||
@Override
|
||||
public int uncompress(StreamInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(in, inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
byte[] buf = inputBuffer;
|
||||
if (buf != null) {
|
||||
inputBuffer = null;
|
||||
recycler.releaseInputBuffer(buf);
|
||||
}
|
||||
buf = uncompressed;
|
||||
if (buf != null) {
|
||||
uncompressed = null;
|
||||
recycler.releaseDecodeBuffer(uncompressed);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link DeflateCompressor} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressor implements Compressor {
|
||||
|
||||
static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
|
||||
|
||||
private ChunkDecoder decoder;
|
||||
|
||||
public LZFCompressor() {
|
||||
this.decoder = ChunkDecoderFactory.safeInstance();
|
||||
Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == LZFChunk.BYTE_Z &&
|
||||
bytes.get(1) == LZFChunk.BYTE_V &&
|
||||
(bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
int offset = buffer.readerIndex();
|
||||
return buffer.readableBytes() >= 3 &&
|
||||
buffer.getByte(offset) == LZFChunk.BYTE_Z &&
|
||||
buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
|
||||
(buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(IndexInput in) throws IOException {
|
||||
long currentPointer = in.getFilePointer();
|
||||
// since we have some metdata before the first compressed header, we check on our specific header
|
||||
if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < LUCENE_HEADER.length; i++) {
|
||||
if (in.readByte() != LUCENE_HEADER[i]) {
|
||||
in.seek(currentPointer);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
in.seek(currentPointer);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
return new LZFCompressedStreamInput(in, decoder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
|
||||
return new LZFCompressedIndexInput(in, decoder);
|
||||
}
|
||||
}
|
@ -34,8 +34,6 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
|
||||
|
||||
protected boolean translated = false;
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -89,6 +89,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
|
||||
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
|
||||
//note: ShapeCollection is probably faster than a Multi* geom.
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
|
||||
@ -141,9 +142,10 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
|
||||
Edge[] edges = new Edge[numEdges];
|
||||
Edge[] holeComponents = new Edge[holes.size()];
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0);
|
||||
final AtomicBoolean translated = new AtomicBoolean(false);
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0, translated);
|
||||
for (int i = 0; i < holes.size(); i++) {
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset);
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset, translated);
|
||||
holeComponents[i] = edges[offset];
|
||||
offset += length;
|
||||
}
|
||||
@ -508,14 +510,157 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
}
|
||||
|
||||
private static int createEdges(int component, Orientation orientation, LineStringBuilder shell,
|
||||
LineStringBuilder hole,
|
||||
Edge[] edges, int offset) {
|
||||
LineStringBuilder hole, Edge[] edges, int offset, final AtomicBoolean translated) {
|
||||
// inner rings (holes) have an opposite direction than the outer rings
|
||||
// XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F)
|
||||
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
|
||||
// set the points array accordingly (shell or hole)
|
||||
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
|
||||
Edge.ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1);
|
||||
ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
|
||||
return points.length-1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
translated.set(true);
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
private static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,150 +362,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
}
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
protected static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (shell.translated && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
shell.translated = true;
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
protected static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the intersection of this line segment to the given position
|
||||
*
|
||||
@ -517,7 +373,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
return intersect = position(coordinate, next.coordinate, position);
|
||||
}
|
||||
|
||||
public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
protected static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
if (position == 0) {
|
||||
return p1;
|
||||
} else if (position == 1) {
|
||||
@ -542,7 +398,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
public int compare(Edge o1, Edge o2) {
|
||||
return Double.compare(o1.intersect.y, o2.intersect.y);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static enum Orientation {
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.common.lease;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/** Utility methods to work with {@link Releasable}s. */
|
||||
|
@ -123,7 +123,7 @@ public abstract class ExtensionPoint {
|
||||
public static final class SelectedType<T> extends ClassMap<T> {
|
||||
|
||||
public SelectedType(String name, Class<T> extensionClass) {
|
||||
super(name, extensionClass, Collections.EMPTY_SET);
|
||||
super(name, extensionClass, Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -131,7 +131,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
||||
if (metaData != null) {
|
||||
ShardPath shardPath = null;
|
||||
try {
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
|
||||
if (shardPath == null) {
|
||||
throw new IllegalStateException(shardId + " no shard path found");
|
||||
|
@ -55,7 +55,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
private final Environment environemnt;
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
|
||||
this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment,
|
||||
|
@ -360,7 +360,7 @@ public abstract class FieldMapper extends Mapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
String mergedType = mergeWith.getClass().getSimpleName();
|
||||
if (mergeWith instanceof FieldMapper) {
|
||||
@ -614,7 +614,7 @@ public abstract class FieldMapper extends Mapper {
|
||||
}
|
||||
|
||||
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
|
||||
|
||||
List<FieldMapper> newFieldMappers = null;
|
||||
|
@ -154,5 +154,5 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
||||
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
|
||||
public abstract String name();
|
||||
|
||||
public abstract void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException;
|
||||
public abstract void merge(Mapper mergeWith, MergeResult mergeResult);
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
// simulate first
|
||||
MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
|
||||
if (result.hasConflicts()) {
|
||||
throw new MergeMappingException(result.buildConflicts());
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
|
||||
}
|
||||
// then apply for real
|
||||
result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public final class MergeMappingException extends MapperException {
|
||||
|
||||
private final String[] failures;
|
||||
|
||||
public MergeMappingException(String[] failures) {
|
||||
super("Merge failed with failures {" + Arrays.toString(failures) + "}");
|
||||
Objects.requireNonNull(failures, "failures must be non-null");
|
||||
this.failures = failures;
|
||||
}
|
||||
|
||||
public MergeMappingException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
failures = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(failures);
|
||||
}
|
||||
|
||||
public String[] failures() {
|
||||
return failures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.BAD_REQUEST;
|
||||
}
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
/**
|
||||
@ -159,16 +160,22 @@ public class SourceToParse {
|
||||
return this.ttl;
|
||||
}
|
||||
|
||||
public SourceToParse ttl(TimeValue ttl) {
|
||||
if (ttl == null) {
|
||||
this.ttl = -1;
|
||||
return this;
|
||||
}
|
||||
this.ttl = ttl.millis();
|
||||
return this;
|
||||
}
|
||||
|
||||
public SourceToParse ttl(long ttl) {
|
||||
this.ttl = ttl;
|
||||
return this;
|
||||
}
|
||||
|
||||
public static enum Origin {
|
||||
|
||||
public enum Origin {
|
||||
PRIMARY,
|
||||
REPLICA
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -605,7 +605,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
|
@ -245,7 +245,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
@ -360,7 +359,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
|
||||
@ -191,7 +190,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
@ -389,7 +388,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -39,7 +39,6 @@ import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
@ -298,7 +297,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
@ -472,7 +471,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
|
@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -310,7 +309,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
|
||||
}
|
||||
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -332,7 +331,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -280,7 +279,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
|
||||
|
@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -372,7 +371,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
|
||||
if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
|
||||
|
@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -250,7 +249,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
@ -44,7 +44,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -442,7 +441,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
|
||||
if (mergeResult.simulate()) {
|
||||
if (this.enabled != sourceMergeWith.enabled) {
|
||||
|
@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -259,7 +258,7 @@ public class TTLFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
|
||||
if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
|
||||
if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) {
|
||||
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
@ -380,7 +379,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!mergeResult.simulate()) {
|
||||
|
@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -226,7 +225,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -226,7 +225,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
@ -167,7 +166,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
// nothing to do
|
||||
}
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(final Mapper mergeWith, final MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(final Mapper mergeWith, final MergeResult mergeResult) {
|
||||
if (!(mergeWith instanceof ObjectMapper)) {
|
||||
mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
|
||||
return;
|
||||
|
@ -166,7 +166,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
current = createWriter(checkpoint.generation + 1);
|
||||
this.lastCommittedTranslogFileGeneration = translogGeneration.translogFileGeneration;
|
||||
} else {
|
||||
this.recoveredTranslogs = Collections.EMPTY_LIST;
|
||||
this.recoveredTranslogs = Collections.emptyList();
|
||||
IOUtils.rm(location);
|
||||
logger.debug("wipe translog location - creating new translog");
|
||||
Files.createDirectories(location);
|
||||
@ -582,7 +582,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
* and updated with any future translog.
|
||||
*/
|
||||
public static final class View implements Closeable {
|
||||
public static final Translog.View EMPTY_VIEW = new View(Collections.EMPTY_LIST, null);
|
||||
public static final Translog.View EMPTY_VIEW = new View(Collections.emptyList(), null);
|
||||
|
||||
boolean closed;
|
||||
// last in this list is always FsTranslog.current
|
||||
|
@ -264,7 +264,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
}
|
||||
final String indexName = indexMetaData.getIndex();
|
||||
final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(indexName, indexExpression, clusterService.state());
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST, indexNameMatcher);
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList(), indexNameMatcher);
|
||||
Index index = new Index(indexMetaData.getIndex());
|
||||
if (indices.containsKey(index.name())) {
|
||||
throw new IndexAlreadyExistsException(index);
|
||||
@ -461,7 +461,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
|
||||
/**
|
||||
* This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting
|
||||
* is prevented by {@link #canDeleteShardContent(org.elasticsearch.index.shard.ShardId, org.elasticsearch.cluster.metadata.IndexMetaData)}
|
||||
* is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)}
|
||||
* of if the shards lock can not be acquired.
|
||||
*
|
||||
* On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove the index folder as well.
|
||||
@ -529,18 +529,10 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
* </ul>
|
||||
*
|
||||
* @param shardId the shard to delete.
|
||||
* @param metaData the shards index metadata. This is required to access the indexes settings etc.
|
||||
* @param indexSettings the shards's relevant {@link IndexSettings}. This is required to access the indexes settings etc.
|
||||
*/
|
||||
public boolean canDeleteShardContent(ShardId shardId, IndexMetaData metaData) {
|
||||
// we need the metadata here since we have to build the complete settings
|
||||
// to decide where the shard content lives. In the future we might even need more info here ie. for shadow replicas
|
||||
// The plan was to make it harder to miss-use and ask for metadata instead of simple settings
|
||||
assert shardId.getIndex().equals(metaData.getIndex());
|
||||
final IndexSettings indexSettings = buildIndexSettings(metaData);
|
||||
return canDeleteShardContent(shardId, indexSettings);
|
||||
}
|
||||
|
||||
private boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
|
||||
public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
|
||||
assert shardId.getIndex().equals(indexSettings.getIndex().name());
|
||||
final IndexService indexService = this.indices.get(shardId.getIndex());
|
||||
if (indexSettings.isOnSharedFilesystem() == false) {
|
||||
if (indexService != null && nodeEnv.hasNodeFile()) {
|
||||
@ -562,7 +554,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
// play safe here and make sure that we take node level settings into account.
|
||||
// we might run on nodes where we use shard FS and then in the future don't delete
|
||||
// actual content.
|
||||
return new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
|
||||
return new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -150,18 +150,23 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
||||
protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
// onDocIdSetEviction might sometimes be called with a number
|
||||
// of entries equal to zero if the cache for the given segment
|
||||
// was already empty when the close listener was called
|
||||
if (numEntries > 0) {
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
} else {
|
||||
final IndexMetaData metaData = previousState.metaData().index(index);
|
||||
assert metaData != null;
|
||||
indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
|
||||
indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
|
||||
}
|
||||
try {
|
||||
@ -349,7 +349,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
// we only create / update here
|
||||
continue;
|
||||
}
|
||||
List<String> typesToRefresh = new ArrayList<>();
|
||||
boolean requireRefresh = false;
|
||||
String index = indexMetaData.getIndex();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
@ -358,31 +358,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
}
|
||||
try {
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
// first, go over and update the _default_ mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
boolean requireRefresh = processMapping(index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source());
|
||||
if (requireRefresh) {
|
||||
typesToRefresh.add(MapperService.DEFAULT_MAPPING);
|
||||
}
|
||||
}
|
||||
|
||||
// go over and add the relevant mappings (or update them)
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMd = cursor.value;
|
||||
String mappingType = mappingMd.type();
|
||||
CompressedXContent mappingSource = mappingMd.source();
|
||||
if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first
|
||||
continue;
|
||||
}
|
||||
boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource);
|
||||
if (requireRefresh) {
|
||||
typesToRefresh.add(mappingType);
|
||||
}
|
||||
requireRefresh |= processMapping(index, mapperService, mappingType, mappingSource);
|
||||
}
|
||||
if (!typesToRefresh.isEmpty() && sendRefreshMapping) {
|
||||
if (requireRefresh && sendRefreshMapping) {
|
||||
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
|
||||
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(),
|
||||
typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())
|
||||
event.state().nodes().localNodeId())
|
||||
);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
@ -398,26 +384,21 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
}
|
||||
|
||||
private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable {
|
||||
if (!seenMappings.containsKey(new Tuple<>(index, mappingType))) {
|
||||
seenMappings.put(new Tuple<>(index, mappingType), true);
|
||||
}
|
||||
|
||||
// refresh mapping can happen for 2 reasons. The first is less urgent, and happens when the mapping on this
|
||||
// node is ahead of what there is in the cluster state (yet an update-mapping has been sent to it already,
|
||||
// it just hasn't been processed yet and published). Eventually, the mappings will converge, and the refresh
|
||||
// mapping sent is more of a safe keeping (assuming the update mapping failed to reach the master, ...)
|
||||
// the second case is where the parsing/merging of the mapping from the metadata doesn't result in the same
|
||||
// refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
|
||||
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
|
||||
// merge version of it, which it does when refreshing the mappings), and warn log it.
|
||||
boolean requiresRefresh = false;
|
||||
try {
|
||||
if (!mapperService.hasMapping(mappingType)) {
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
|
||||
|
||||
if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) {
|
||||
String op = existingMapper == null ? "adding" : "updating";
|
||||
if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
|
||||
logger.debug("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
|
||||
logger.debug("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
|
||||
logger.trace("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
|
||||
} else {
|
||||
logger.debug("[{}] adding mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType);
|
||||
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, op, mappingType);
|
||||
}
|
||||
// we don't apply default, since it has been applied when the mappings were parsed initially
|
||||
mapperService.merge(mappingType, mappingSource, false, true);
|
||||
@ -425,24 +406,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
|
||||
requiresRefresh = true;
|
||||
}
|
||||
} else {
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
|
||||
if (!mappingSource.equals(existingMapper.mappingSource())) {
|
||||
// mapping changed, update it
|
||||
if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
|
||||
logger.debug("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
|
||||
} else {
|
||||
logger.debug("[{}] updating mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType);
|
||||
}
|
||||
// we don't apply default, since it has been applied when the mappings were parsed initially
|
||||
mapperService.merge(mappingType, mappingSource, false, true);
|
||||
if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
|
||||
requiresRefresh = true;
|
||||
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource);
|
||||
@ -781,27 +744,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to clean index ({})", e, reason);
|
||||
}
|
||||
clearSeenMappings(index);
|
||||
|
||||
}
|
||||
|
||||
private void clearSeenMappings(String index) {
|
||||
// clear seen mappings as well
|
||||
for (Tuple<String, String> tuple : seenMappings.keySet()) {
|
||||
if (tuple.v1().equals(index)) {
|
||||
seenMappings.remove(tuple);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteIndex(String index, String reason) {
|
||||
try {
|
||||
indicesService.deleteIndex(index, reason);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to delete index ({})", e, reason);
|
||||
}
|
||||
// clear seen mappings as well
|
||||
clearSeenMappings(index);
|
||||
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -43,6 +44,7 @@ import org.elasticsearch.transport.*;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -97,11 +99,12 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
}
|
||||
|
||||
for (IndexRoutingTable indexRoutingTable : event.state().routingTable()) {
|
||||
IndexSettings indexSettings = new IndexSettings(event.state().getMetaData().index(indexRoutingTable.index()), settings, Collections.emptyList());
|
||||
// Note, closed indices will not have any routing information, so won't be deleted
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) {
|
||||
ShardId shardId = indexShardRoutingTable.shardId();
|
||||
if (indicesService.canDeleteShardContent(shardId, event.state().getMetaData().index(shardId.getIndex()))) {
|
||||
if (indicesService.canDeleteShardContent(shardId, indexSettings)) {
|
||||
deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable);
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
||||
if (!storeType.contains("fs")) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
|
||||
final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
|
||||
if (shardPath == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
|
@ -729,7 +729,7 @@ public class PercolateContext extends SearchContext {
|
||||
|
||||
@Override
|
||||
public Set<String> getHeaders() {
|
||||
return Collections.EMPTY_SET;
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -70,12 +70,6 @@ public abstract class Plugin {
|
||||
return Settings.Builder.EMPTY_SETTINGS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called once the given {@link IndexService} is fully constructed but not yet published.
|
||||
* This is used to initialize plugin services that require acess to index level resources
|
||||
*/
|
||||
public void onIndexService(IndexService indexService) {}
|
||||
|
||||
/**
|
||||
* Called before a new index is created on a node. The given module can be used to regsiter index-leve
|
||||
* extensions.
|
||||
|
@ -247,14 +247,6 @@ public class PluginsService extends AbstractComponent {
|
||||
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
|
||||
plugin.v2().onIndexModule(indexModule);
|
||||
}
|
||||
indexModule.addIndexEventListener(new IndexEventListener() {
|
||||
@Override
|
||||
public void afterIndexCreated(IndexService indexService) {
|
||||
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
|
||||
plugin.v2().onIndexService(indexService);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Get information about plugins (jvm and site plugins).
|
||||
|
@ -294,7 +294,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||
if (readOnly()) {
|
||||
throw new RepositoryException(this.repositoryName, "cannot delete snapshot from a readonly repository");
|
||||
}
|
||||
List<String> indices = Collections.EMPTY_LIST;
|
||||
List<String> indices = Collections.emptyList();
|
||||
Snapshot snapshot = null;
|
||||
try {
|
||||
snapshot = readSnapshot(snapshotId);
|
||||
|
@ -74,7 +74,7 @@ public class RestIndexAction extends BaseRestHandler {
|
||||
indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
|
||||
indexRequest.timestamp(request.param("timestamp"));
|
||||
if (request.hasParam("ttl")) {
|
||||
indexRequest.ttl(request.paramAsTime("ttl", null).millis());
|
||||
indexRequest.ttl(request.param("ttl"));
|
||||
}
|
||||
indexRequest.source(request.content());
|
||||
indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
|
||||
|
@ -105,7 +105,7 @@ public class RestUpdateAction extends BaseRestHandler {
|
||||
upsertRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
|
||||
upsertRequest.timestamp(request.param("timestamp"));
|
||||
if (request.hasParam("ttl")) {
|
||||
upsertRequest.ttl(request.paramAsTime("ttl", null).millis());
|
||||
upsertRequest.ttl(request.param("ttl"));
|
||||
}
|
||||
upsertRequest.version(RestActions.parseVersion(request));
|
||||
upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType()));
|
||||
@ -116,7 +116,7 @@ public class RestUpdateAction extends BaseRestHandler {
|
||||
doc.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
|
||||
doc.timestamp(request.param("timestamp"));
|
||||
if (request.hasParam("ttl")) {
|
||||
doc.ttl(request.paramAsTime("ttl", null).millis());
|
||||
doc.ttl(request.param("ttl"));
|
||||
}
|
||||
doc.version(RestActions.parseVersion(request));
|
||||
doc.versionType(VersionType.fromString(request.param("version_type"), doc.versionType()));
|
||||
|
@ -119,14 +119,12 @@ public class Template extends Script {
|
||||
return template;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Script parse(Map<String, Object> config, boolean removeMatchedEntries, ParseFieldMatcher parseFieldMatcher) {
|
||||
return new TemplateParser(Collections.EMPTY_MAP, MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher);
|
||||
return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Template parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
|
||||
return new TemplateParser(Collections.EMPTY_MAP, MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher);
|
||||
return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
|
@ -31,7 +31,7 @@ import java.util.Map;
|
||||
*/
|
||||
public class SignificanceHeuristicStreams {
|
||||
|
||||
private static Map<String, Stream> STREAMS = Collections.EMPTY_MAP;
|
||||
private static Map<String, Stream> STREAMS = Collections.emptyMap();
|
||||
|
||||
static {
|
||||
HashMap<String, Stream> map = new HashMap<>();
|
||||
|
@ -78,7 +78,7 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
|
||||
}
|
||||
}
|
||||
}
|
||||
return buildAggregation(Collections.EMPTY_LIST, metaData());
|
||||
return buildAggregation(Collections.emptyList(), metaData());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -123,4 +123,4 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
|
||||
gapPolicy.writeTo(out);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public class MaxBucketPipelineAggregator extends BucketMetricsPipelineAggregator
|
||||
@Override
|
||||
protected InternalAggregation buildAggregation(List<PipelineAggregator> pipelineAggregators, Map<String, Object> metadata) {
|
||||
String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]);
|
||||
return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.EMPTY_LIST, metaData());
|
||||
return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.emptyList(), metaData());
|
||||
}
|
||||
|
||||
public static class Factory extends PipelineAggregatorFactory {
|
||||
|
@ -91,7 +91,7 @@ public class MinBucketPipelineAggregator extends BucketMetricsPipelineAggregator
|
||||
protected InternalAggregation buildAggregation(java.util.List<PipelineAggregator> pipelineAggregators,
|
||||
java.util.Map<String, Object> metadata) {
|
||||
String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]);
|
||||
return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.EMPTY_LIST, metaData());
|
||||
return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.emptyList(), metaData());
|
||||
};
|
||||
|
||||
public static class Factory extends PipelineAggregatorFactory {
|
||||
|
@ -32,7 +32,7 @@ import java.util.Map;
|
||||
*/
|
||||
public class MovAvgModelStreams {
|
||||
|
||||
private static Map<String, Stream> STREAMS = Collections.EMPTY_MAP;
|
||||
private static Map<String, Stream> STREAMS = Collections.emptyMap();
|
||||
|
||||
static {
|
||||
HashMap<String, Stream> map = new HashMap<>();
|
||||
|
@ -35,7 +35,7 @@ public final class Suggesters extends ExtensionPoint.ClassMap<Suggester> {
|
||||
private final Map<String, Suggester> parsers;
|
||||
|
||||
public Suggesters() {
|
||||
this(Collections.EMPTY_MAP);
|
||||
this(Collections.emptyMap());
|
||||
}
|
||||
|
||||
public Suggesters(Map<String, Suggester> suggesters) {
|
||||
|
@ -38,10 +38,10 @@ public class CompletionSuggestionContext extends SuggestionSearchContext.Suggest
|
||||
private CompletionFieldMapper.CompletionFieldType fieldType;
|
||||
private CompletionSuggestionBuilder.FuzzyOptionsBuilder fuzzyOptionsBuilder;
|
||||
private CompletionSuggestionBuilder.RegexOptionsBuilder regexOptionsBuilder;
|
||||
private Map<String, List<ContextMapping.QueryContext>> queryContexts = Collections.EMPTY_MAP;
|
||||
private Map<String, List<ContextMapping.QueryContext>> queryContexts = Collections.emptyMap();
|
||||
private final MapperService mapperService;
|
||||
private final IndexFieldDataService indexFieldDataService;
|
||||
private Set<String> payloadFields = Collections.EMPTY_SET;
|
||||
private Set<String> payloadFields = Collections.emptySet();
|
||||
|
||||
CompletionSuggestionContext(Suggester suggester, MapperService mapperService, IndexFieldDataService indexFieldDataService) {
|
||||
super(suggester);
|
||||
|
@ -29,8 +29,6 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.*;
|
||||
|
||||
/**
|
||||
* Represent information about snapshot
|
||||
*/
|
||||
@ -93,7 +91,7 @@ public class Snapshot implements Comparable<Snapshot>, ToXContent, FromXContentB
|
||||
* Special constructor for the prototype object
|
||||
*/
|
||||
private Snapshot() {
|
||||
this("", (List<String>) EMPTY_LIST, 0);
|
||||
this("", Collections.emptyList(), 0);
|
||||
}
|
||||
|
||||
private static SnapshotState snapshotState(String reason, List<SnapshotShardFailure> shardFailures) {
|
||||
|
@ -154,7 +154,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
||||
|
||||
@Override
|
||||
public Map<String, BoundTransportAddress> profileBoundAddresses() {
|
||||
return Collections.EMPTY_MAP;
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -51,7 +51,6 @@ import org.elasticsearch.index.AlreadyExpiredException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.engine.IndexFailedEngineException;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
@ -275,11 +274,6 @@ public class ExceptionSerializationTests extends ESTestCase {
|
||||
assertEquals(-3, alreadyExpiredException.now());
|
||||
}
|
||||
|
||||
public void testMergeMappingException() throws IOException {
|
||||
MergeMappingException ex = serialize(new MergeMappingException(new String[]{"one", "two"}));
|
||||
assertArrayEquals(ex.failures(), new String[]{"one", "two"});
|
||||
}
|
||||
|
||||
public void testActionNotFoundTransportException() throws IOException {
|
||||
ActionNotFoundTransportException ex = serialize(new ActionNotFoundTransportException("AACCCTION"));
|
||||
assertEquals("AACCCTION", ex.action());
|
||||
@ -725,7 +719,6 @@ public class ExceptionSerializationTests extends ESTestCase {
|
||||
ids.put(84, org.elasticsearch.transport.NodeDisconnectedException.class);
|
||||
ids.put(85, org.elasticsearch.index.AlreadyExpiredException.class);
|
||||
ids.put(86, org.elasticsearch.search.aggregations.AggregationExecutionException.class);
|
||||
ids.put(87, org.elasticsearch.index.mapper.MergeMappingException.class);
|
||||
ids.put(88, org.elasticsearch.indices.InvalidIndexTemplateException.class);
|
||||
ids.put(89, org.elasticsearch.percolator.PercolateException.class);
|
||||
ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class);
|
||||
|
@ -20,6 +20,8 @@
|
||||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.cluster.metadata.AliasValidator;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
@ -28,13 +30,10 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.InvalidIndexTemplateException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
|
||||
public class MetaDataIndexTemplateServiceTests extends ESTestCase {
|
||||
@ -68,6 +67,17 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
|
||||
assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
|
||||
}
|
||||
|
||||
public void testIndexTemplateWithAliasNameEqualToTemplatePattern() {
|
||||
PutRequest request = new PutRequest("api", "foobar_template");
|
||||
request.template("foobar");
|
||||
request.aliases(Collections.singleton(new Alias("foobar")));
|
||||
|
||||
List<Throwable> errors = putTemplate(request);
|
||||
assertThat(errors.size(), equalTo(1));
|
||||
assertThat(errors.get(0), instanceOf(IllegalArgumentException.class));
|
||||
assertThat(errors.get(0).getMessage(), equalTo("Alias [foobar] cannot be the same as the template pattern [foobar]"));
|
||||
}
|
||||
|
||||
private static List<Throwable> putTemplate(PutRequest request) {
|
||||
MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(
|
||||
Settings.EMPTY,
|
||||
@ -79,7 +89,7 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
|
||||
new HashSet<>(),
|
||||
null,
|
||||
null);
|
||||
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, null);
|
||||
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY));
|
||||
|
||||
final List<Throwable> throwables = new ArrayList<>();
|
||||
service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {
|
||||
|
@ -18,6 +18,8 @@
|
||||
*/
|
||||
package org.elasticsearch.action.index;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
@ -25,10 +27,7 @@ import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*/
|
||||
@ -67,4 +66,43 @@ public class IndexRequestTests extends ESTestCase {
|
||||
request.version(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
assertThat(request.validate().validationErrors(), not(empty()));
|
||||
}
|
||||
|
||||
public void testSetTTLAsTimeValue() {
|
||||
IndexRequest indexRequest = new IndexRequest();
|
||||
TimeValue ttl = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl");
|
||||
indexRequest.ttl(ttl);
|
||||
assertThat(indexRequest.ttl(), equalTo(ttl));
|
||||
}
|
||||
|
||||
public void testSetTTLAsString() {
|
||||
IndexRequest indexRequest = new IndexRequest();
|
||||
String ttlAsString = randomTimeValue();
|
||||
TimeValue ttl = TimeValue.parseTimeValue(ttlAsString, null, "ttl");
|
||||
indexRequest.ttl(ttlAsString);
|
||||
assertThat(indexRequest.ttl(), equalTo(ttl));
|
||||
}
|
||||
|
||||
public void testSetTTLAsLong() {
|
||||
IndexRequest indexRequest = new IndexRequest();
|
||||
String ttlAsString = randomTimeValue();
|
||||
TimeValue ttl = TimeValue.parseTimeValue(ttlAsString, null, "ttl");
|
||||
indexRequest.ttl(ttl.millis());
|
||||
assertThat(indexRequest.ttl(), equalTo(ttl));
|
||||
}
|
||||
|
||||
public void testValidateTTL() {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type");
|
||||
if (randomBoolean()) {
|
||||
indexRequest.ttl(randomIntBetween(Integer.MIN_VALUE, -1));
|
||||
} else {
|
||||
if (randomBoolean()) {
|
||||
indexRequest.ttl(new TimeValue(randomIntBetween(Integer.MIN_VALUE, -1)));
|
||||
} else {
|
||||
indexRequest.ttl(randomIntBetween(Integer.MIN_VALUE, -1) + "ms");
|
||||
}
|
||||
}
|
||||
ActionRequestValidationException validate = indexRequest.validate();
|
||||
assertThat(validate, notNullValue());
|
||||
assertThat(validate.getMessage(), containsString("ttl must not be negative"));
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
@ -33,11 +34,7 @@ import org.elasticsearch.test.ESTestCase;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public class UpdateRequestTests extends ESTestCase {
|
||||
public void testUpdateRequest() throws Exception {
|
||||
@ -127,7 +124,7 @@ public class UpdateRequestTests extends ESTestCase {
|
||||
|
||||
// Related to issue 3256
|
||||
public void testUpdateRequestWithTTL() throws Exception {
|
||||
long providedTTLValue = randomIntBetween(500, 1000);
|
||||
TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl");
|
||||
Settings settings = settings(Version.CURRENT).build();
|
||||
|
||||
UpdateHelper updateHelper = new UpdateHelper(settings, null);
|
||||
|
@ -280,46 +280,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testHandlingOfUnsupportedDanglingIndexes() throws Exception {
|
||||
setupCluster();
|
||||
Collections.shuffle(unsupportedIndexes, getRandom());
|
||||
for (String index : unsupportedIndexes) {
|
||||
assertUnsupportedIndexHandling(index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for the index to show up in the cluster state in closed state
|
||||
*/
|
||||
void ensureClosed(final String index) throws InterruptedException {
|
||||
assertTrue(awaitBusy(() -> {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
return state.metaData().hasIndex(index) && state.metaData().index(index).getState() == IndexMetaData.State.CLOSE;
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that the given index cannot be opened due to incompatible version
|
||||
*/
|
||||
void assertUnsupportedIndexHandling(String index) throws Exception {
|
||||
long startTime = System.currentTimeMillis();
|
||||
logger.info("--> Testing old index " + index);
|
||||
String indexName = loadIndex(index);
|
||||
// force reloading dangling indices with a cluster state republish
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
ensureClosed(indexName);
|
||||
try {
|
||||
client().admin().indices().prepareOpen(indexName).get();
|
||||
fail("Shouldn't be able to open an old index");
|
||||
} catch (IllegalStateException ex) {
|
||||
assertThat(ex.getMessage(), containsString("was created before v2.0.0.beta1 and wasn't upgraded"));
|
||||
}
|
||||
unloadIndex(indexName);
|
||||
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
|
||||
}
|
||||
|
||||
void assertOldIndexWorks(String index) throws Exception {
|
||||
Version version = extractVersion(index);
|
||||
String indexName = loadIndex(index);
|
||||
|
@ -189,6 +189,6 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
||||
|
||||
@Override
|
||||
public Map<String, BoundTransportAddress> profileBoundAddresses() {
|
||||
return Collections.EMPTY_MAP;
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
transport = new FailAndRetryMockTransport<TestResponse>(getRandom()) {
|
||||
@Override
|
||||
public List<String> getLocalAddresses() {
|
||||
return Collections.EMPTY_LIST;
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class MetaDataTests extends ESTestCase {
|
||||
|
||||
public void testIndexAndAliasWithSameName() {
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder("index")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)
|
||||
.putAlias(AliasMetaData.builder("index").build());
|
||||
try {
|
||||
MetaData.builder().put(builder).build();
|
||||
fail("expection should have been thrown");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), equalTo("index and alias names need to be unique, but alias [index] and index [index] have the same name"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user