From f8a414c681179f168c2eeb2723dfd449357e5643 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Wed, 25 Nov 2015 19:34:33 +0100
Subject: [PATCH 01/41] Remove Plugin.onIndexService.

---
 core/src/main/java/org/elasticsearch/plugins/Plugin.java  | 6 ------
 .../java/org/elasticsearch/plugins/PluginsService.java    | 8 --------
 2 files changed, 14 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java
index 7c98879953f..ce02de4fa83 100644
--- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java
+++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java
@@ -70,12 +70,6 @@ public abstract class Plugin {
         return Settings.Builder.EMPTY_SETTINGS;
     }
 
-    /**
-     * Called once the given {@link IndexService} is fully constructed but not yet published.
-     * This is used to initialize plugin services that require acess to index level resources
-     */
-    public void onIndexService(IndexService indexService) {}
-
     /**
      * Called before a new index is created on a node. The given module can be used to regsiter index-leve
      * extensions.
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
index 4fddf7ad838..1db54458cef 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java
@@ -247,14 +247,6 @@ public class PluginsService extends AbstractComponent {
         for (Tuple<PluginInfo, Plugin> plugin : plugins) {
             plugin.v2().onIndexModule(indexModule);
         }
-        indexModule.addIndexEventListener(new IndexEventListener() {
-            @Override
-            public void afterIndexCreated(IndexService indexService) {
-                for (Tuple<PluginInfo, Plugin> plugin : plugins) {
-                    plugin.v2().onIndexService(indexService);
-                }
-            }
-        });
     }
     /**
      * Get information about plugins (jvm and site plugins).

From d7f4dd0767db33ac331236629304b5e7f4168d96 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 25 Nov 2015 08:55:23 -0500
Subject: [PATCH 02/41] Use general cluster state batching mechanism for shard
 failures

This commit modifies the handling of shard failure cluster state updates
to use the general cluster state batching mechanism. An advantage of
this approach is we now get correct per-listener notification on
failures.
---
 .../action/shard/ShardStateAction.java        | 80 +++++++++----------
 .../routing/allocation/AllocationService.java |  6 +-
 2 files changed, 42 insertions(+), 44 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 1b43a33627b..8a7340f2ae4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -20,9 +20,7 @@
 package org.elasticsearch.cluster.action.shard;
 
 import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.*;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.RoutingService;
@@ -46,7 +44,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CountDownLatch;
 
 import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
 
@@ -64,7 +61,6 @@ public class ShardStateAction extends AbstractComponent {
     private final RoutingService routingService;
 
     private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
-    private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
 
     @Inject
     public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
@@ -141,54 +137,52 @@ public class ShardStateAction extends AbstractComponent {
                 });
     }
 
+    private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
+
     private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
         logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
-        failedShardQueue.add(shardRoutingEntry);
-        clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
-                new ClusterStateUpdateTask(Priority.HIGH) {
+        clusterService.submitStateUpdateTask(
+                "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
+                shardRoutingEntry,
+                ClusterStateTaskConfig.build(Priority.HIGH),
+                shardFailedClusterStateHandler,
+                shardFailedClusterStateHandler);
+    }
 
-            @Override
-            public ClusterState execute(ClusterState currentState) {
-                if (shardRoutingEntry.processed) {
-                    return currentState;
+    class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
+        @Override
+        public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
+            BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
+            ClusterState accumulator = ClusterState.builder(currentState).build();
+            for (ShardRoutingEntry task : tasks) {
+                task.processed = true;
+                try {
+                    RoutingAllocation.Result result = allocationService.applyFailedShard(
+                            currentState,
+                            new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
+                    builder.success(task);
+                    if (result.changed()) {
+                        accumulator = ClusterState.builder(accumulator).routingResult(result).build();
+                    }
+                } catch (Throwable t) {
+                    builder.failure(task, t);
                 }
-
-                List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
-                failedShardQueue.drainTo(shardRoutingEntries);
-
-                // nothing to process (a previous event has processed it already)
-                if (shardRoutingEntries.isEmpty()) {
-                    return currentState;
-                }
-
-                List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size());
-
-                // mark all entries as processed
-                for (ShardRoutingEntry entry : shardRoutingEntries) {
-                    entry.processed = true;
-                    shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure));
-                }
-
-                RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
-                if (!routingResult.changed()) {
-                    return currentState;
-                }
-                return ClusterState.builder(currentState).routingResult(routingResult).build();
             }
+            return builder.build(accumulator);
+        }
 
-            @Override
-            public void onFailure(String source, Throwable t) {
-                logger.error("unexpected failure during [{}]", t, source);
-            }
-
-            @Override
-            public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+        @Override
+        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
                 if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
                     logger.trace("unassigned shards after shard failures. scheduling a reroute.");
                     routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
                 }
-            }
-        });
+        }
+
+        @Override
+        public void onFailure(String source, Throwable t) {
+            logger.error("unexpected failure during [{}]", t, source);
+        }
     }
 
     private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index f819d6fde0a..e19d51981a6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -98,7 +98,11 @@ public class AllocationService extends AbstractComponent {
     }
 
     public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
-        return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
+        return applyFailedShard(clusterState, new FailedRerouteAllocation.FailedShard(failedShard, null, null));
+    }
+
+    public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, FailedRerouteAllocation.FailedShard failedShard) {
+        return applyFailedShards(clusterState, Collections.singletonList(failedShard));
     }
 
     /**

From 413688b0ca560915f823c9beaf71a4f4bb2e0817 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Tue, 1 Dec 2015 11:19:47 -0500
Subject: [PATCH 03/41] Apply shard failures in a single batch

---
 .../action/shard/ShardStateAction.java        | 26 +++++++++----------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 8a7340f2ae4..da014e15b6b 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -153,22 +153,22 @@ public class ShardStateAction extends AbstractComponent {
         @Override
         public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
             BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
-            ClusterState accumulator = ClusterState.builder(currentState).build();
+            List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
             for (ShardRoutingEntry task : tasks) {
                 task.processed = true;
-                try {
-                    RoutingAllocation.Result result = allocationService.applyFailedShard(
-                            currentState,
-                            new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
-                    builder.success(task);
-                    if (result.changed()) {
-                        accumulator = ClusterState.builder(accumulator).routingResult(result).build();
-                    }
-                } catch (Throwable t) {
-                    builder.failure(task, t);
-                }
+                shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
             }
-            return builder.build(accumulator);
+            ClusterState maybeUpdatedState = currentState;
+            try {
+                RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
+                if (result.changed()) {
+                    maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
+                }
+                builder.successes(tasks);
+            } catch (Throwable t) {
+                builder.failures(tasks, t);
+            }
+            return builder.build(maybeUpdatedState);
         }
 
         @Override

From 6e5c3017393462c73dc122b50d08a320fa6b740d Mon Sep 17 00:00:00 2001
From: Robert Muir <rmuir@apache.org>
Date: Tue, 1 Dec 2015 22:22:11 -0500
Subject: [PATCH 04/41] remove unused dependencies

---
 .../forbidden/third-party-signatures.txt      |  42 -----
 core/build.gradle                             |   3 -
 .../common/compress/CompressorFactory.java    |   2 -
 .../compress/lzf/LZFCompressedIndexInput.java |  80 --------
 .../lzf/LZFCompressedStreamInput.java         |  73 --------
 .../common/compress/lzf/LZFCompressor.java    | 100 ----------
 .../compress/lzf/CompressedStreamOutput.java  | 143 --------------
 .../lzf/CorruptedCompressorTests.java         |  70 -------
 .../lzf/LZFCompressedStreamOutput.java        |  63 -------
 .../lzf/LZFCompressedStreamTests.java         |  30 ---
 .../compress/lzf/LZFTestCompressor.java       |  34 ----
 .../common/compress/lzf/LZFXContentTests.java |  30 ---
 .../compress/SearchSourceCompressTests.java   |  99 ----------
 .../licenses/compress-lzf-1.0.2.jar.sha1      |   1 -
 distribution/licenses/compress-lzf-LICENSE    |   8 -
 distribution/licenses/compress-lzf-NOTICE     |  24 ---
 distribution/licenses/snakeyaml-1.15.jar.sha1 |   1 -
 distribution/licenses/snakeyaml-LICENSE.txt   | 176 ------------------
 distribution/licenses/snakeyaml-NOTICE.txt    |   1 -
 19 files changed, 980 deletions(-)
 delete mode 100644 core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java
 delete mode 100644 core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java
 delete mode 100644 core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java
 delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java
 delete mode 100644 core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
 delete mode 100644 distribution/licenses/compress-lzf-1.0.2.jar.sha1
 delete mode 100644 distribution/licenses/compress-lzf-LICENSE
 delete mode 100644 distribution/licenses/compress-lzf-NOTICE
 delete mode 100644 distribution/licenses/snakeyaml-1.15.jar.sha1
 delete mode 100644 distribution/licenses/snakeyaml-LICENSE.txt
 delete mode 100644 distribution/licenses/snakeyaml-NOTICE.txt

diff --git a/buildSrc/src/main/resources/forbidden/third-party-signatures.txt b/buildSrc/src/main/resources/forbidden/third-party-signatures.txt
index ac1ce33ac92..93e0eb043b5 100644
--- a/buildSrc/src/main/resources/forbidden/third-party-signatures.txt
+++ b/buildSrc/src/main/resources/forbidden/third-party-signatures.txt
@@ -14,48 +14,6 @@
 # either express or implied. See the License for the specific
 # language governing permissions and limitations under the License.
 
-@defaultMessage unsafe encoders/decoders have problems in the lzf compress library.  Use variants of encode/decode functions which take Encoder/Decoder.
-com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
-com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
-com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
-com.ning.compress.lzf.parallel.CompressTask
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
-com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
-com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
-com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
-com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
-com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
-com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
-com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
-com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
-com.ning.compress.lzf.LZFEncoder#encode(byte[])
-com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
-com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
-com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
-com.ning.compress.lzf.LZFDecoder#fastDecoder()
-com.ning.compress.lzf.LZFDecoder#decode(byte[])
-com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
-com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
-com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
-com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
-com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
-com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
-com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
-com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
-com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
-com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
-
 @defaultMessage Constructing a DateTime without a time zone is dangerous
 org.joda.time.DateTime#<init>()
 org.joda.time.DateTime#<init>(long)
diff --git a/core/build.gradle b/core/build.gradle
index 618e252e9a8..62eef5a5b41 100644
--- a/core/build.gradle
+++ b/core/build.gradle
@@ -62,12 +62,9 @@ dependencies {
   compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
   compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
   compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
-  compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
 
   // network stack
   compile 'io.netty:netty:3.10.5.Final'
-  // compression of transport protocol
-  compile 'com.ning:compress-lzf:1.0.2'
   // percentiles aggregation
   compile 'com.tdunning:t-digest:3.0'
   // precentil ranks aggregation
diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
index 4af2e962d85..8598634ac4c 100644
--- a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
+++ b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
@@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.compress.deflate.DeflateCompressor;
-import org.elasticsearch.common.compress.lzf.LZFCompressor;
 import org.elasticsearch.common.io.Streams;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.io.stream.StreamInput;
@@ -42,7 +41,6 @@ public class CompressorFactory {
 
     static {
         compressors = new Compressor[] {
-                new LZFCompressor(),
                 new DeflateCompressor()
         };
         defaultCompressor = new DeflateCompressor();
diff --git a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java b/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java
deleted file mode 100644
index 93bd583662b..00000000000
--- a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedIndexInput.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import com.ning.compress.lzf.ChunkDecoder;
-import com.ning.compress.lzf.LZFChunk;
-import org.apache.lucene.store.BufferedIndexInput;
-import org.apache.lucene.store.IndexInput;
-import org.elasticsearch.common.compress.CompressedIndexInput;
-import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- */
-@Deprecated
-public class LZFCompressedIndexInput extends CompressedIndexInput {
-
-    private final ChunkDecoder decoder;
-    // scratch area buffer
-    private byte[] inputBuffer;
-
-    public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
-        super(in);
-
-        this.decoder = decoder;
-        this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
-        this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
-        this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
-    }
-
-    @Override
-    protected void readHeader(IndexInput in) throws IOException {
-        byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
-        in.readBytes(header, 0, header.length, false);
-        if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
-            throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
-        }
-    }
-
-    @Override
-    protected int uncompress(IndexInput in, byte[] out) throws IOException {
-        return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
-    }
-
-    @Override
-    protected void doClose() throws IOException {
-        // nothing to do here...
-    }
-
-    @Override
-    public IndexInput clone() {
-        LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
-        cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
-        return cloned;
-    }
-
-    @Override
-    public IndexInput slice(String description, long offset, long length) throws IOException {
-        return BufferedIndexInput.wrap(description, this, offset, length);
-    }
-}
diff --git a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java b/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java
deleted file mode 100644
index baefcaa8928..00000000000
--- a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamInput.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import com.ning.compress.BufferRecycler;
-import com.ning.compress.lzf.ChunkDecoder;
-import com.ning.compress.lzf.LZFChunk;
-import org.elasticsearch.common.compress.CompressedStreamInput;
-import org.elasticsearch.common.io.stream.StreamInput;
-
-import java.io.IOException;
-
-/**
- */
-public class LZFCompressedStreamInput extends CompressedStreamInput {
-
-    private final BufferRecycler recycler;
-
-    private final ChunkDecoder decoder;
-
-    // scratch area buffer
-    private byte[] inputBuffer;
-
-    public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
-        super(in);
-        this.recycler = BufferRecycler.instance();
-        this.decoder = decoder;
-
-        this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
-        this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
-    }
-
-    @Override
-    public void readHeader(StreamInput in) throws IOException {
-        // nothing to do here, each chunk has a header
-    }
-
-    @Override
-    public int uncompress(StreamInput in, byte[] out) throws IOException {
-        return decoder.decodeChunk(in, inputBuffer, out);
-    }
-
-    @Override
-    protected void doClose() throws IOException {
-        byte[] buf = inputBuffer;
-        if (buf != null) {
-            inputBuffer = null;
-            recycler.releaseInputBuffer(buf);
-        }
-        buf = uncompressed;
-        if (buf != null) {
-            uncompressed = null;
-            recycler.releaseDecodeBuffer(uncompressed);
-        }
-    }
-}
diff --git a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java b/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java
deleted file mode 100644
index bb7a642c987..00000000000
--- a/core/src/main/java/org/elasticsearch/common/compress/lzf/LZFCompressor.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import com.ning.compress.lzf.ChunkDecoder;
-import com.ning.compress.lzf.LZFChunk;
-import com.ning.compress.lzf.util.ChunkDecoderFactory;
-import org.apache.lucene.store.IndexInput;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.compress.CompressedIndexInput;
-import org.elasticsearch.common.compress.Compressor;
-import org.elasticsearch.common.compress.deflate.DeflateCompressor;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.Loggers;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-import java.io.IOException;
-
-/**
- * @deprecated Use {@link DeflateCompressor} instead
- */
-@Deprecated
-public class LZFCompressor implements Compressor {
-
-    static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
-
-    private ChunkDecoder decoder;
-
-    public LZFCompressor() {
-        this.decoder = ChunkDecoderFactory.safeInstance();
-        Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
-    }
-
-    @Override
-    public boolean isCompressed(BytesReference bytes) {
-        return bytes.length() >= 3 &&
-                bytes.get(0) == LZFChunk.BYTE_Z &&
-                bytes.get(1) == LZFChunk.BYTE_V &&
-                (bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
-    }
-
-    @Override
-    public boolean isCompressed(ChannelBuffer buffer) {
-        int offset = buffer.readerIndex();
-        return buffer.readableBytes() >= 3 &&
-                buffer.getByte(offset) == LZFChunk.BYTE_Z &&
-                buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
-                (buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
-    }
-
-    @Override
-    public boolean isCompressed(IndexInput in) throws IOException {
-        long currentPointer = in.getFilePointer();
-        // since we have some metdata before the first compressed header, we check on our specific header
-        if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
-            return false;
-        }
-        for (int i = 0; i < LUCENE_HEADER.length; i++) {
-            if (in.readByte() != LUCENE_HEADER[i]) {
-                in.seek(currentPointer);
-                return false;
-            }
-        }
-        in.seek(currentPointer);
-        return true;
-    }
-
-    @Override
-    public StreamInput streamInput(StreamInput in) throws IOException {
-        return new LZFCompressedStreamInput(in, decoder);
-    }
-
-    @Override
-    public StreamOutput streamOutput(StreamOutput out) throws IOException {
-        throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
-    }
-
-    @Override
-    public CompressedIndexInput indexInput(IndexInput in) throws IOException {
-        return new LZFCompressedIndexInput(in, decoder);
-    }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java
deleted file mode 100644
index 3cf0bcd5cfd..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import org.elasticsearch.Version;
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-
-/**
- */
-public abstract class CompressedStreamOutput extends StreamOutput {
-
-    private final StreamOutput out;
-
-    protected byte[] uncompressed;
-    protected int uncompressedLength;
-    private int position = 0;
-
-    private boolean closed;
-
-    public CompressedStreamOutput(StreamOutput out) throws IOException {
-        this.out = out;
-        super.setVersion(out.getVersion());
-        writeHeader(out);
-    }
-
-    @Override
-    public StreamOutput setVersion(Version version) {
-        out.setVersion(version);
-        return super.setVersion(version);
-    }
-
-    @Override
-    public void write(int b) throws IOException {
-        if (position >= uncompressedLength) {
-            flushBuffer();
-        }
-        uncompressed[position++] = (byte) b;
-    }
-
-    @Override
-    public void writeByte(byte b) throws IOException {
-        if (position >= uncompressedLength) {
-            flushBuffer();
-        }
-        uncompressed[position++] = b;
-    }
-
-    @Override
-    public void writeBytes(byte[] input, int offset, int length) throws IOException {
-        // ES, check if length is 0, and don't write in this case
-        if (length == 0) {
-            return;
-        }
-        final int BUFFER_LEN = uncompressedLength;
-
-        // simple case first: buffering only (for trivially short writes)
-        int free = BUFFER_LEN - position;
-        if (free >= length) {
-            System.arraycopy(input, offset, uncompressed, position, length);
-            position += length;
-            return;
-        }
-        // fill partial input as much as possible and flush
-        if (position > 0) {
-            System.arraycopy(input, offset, uncompressed, position, free);
-            position += free;
-            flushBuffer();
-            offset += free;
-            length -= free;
-        }
-
-        // then write intermediate full block, if any, without copying:
-        while (length >= BUFFER_LEN) {
-            compress(input, offset, BUFFER_LEN, out);
-            offset += BUFFER_LEN;
-            length -= BUFFER_LEN;
-        }
-
-        // and finally, copy leftovers in input, if any
-        if (length > 0) {
-            System.arraycopy(input, offset, uncompressed, 0, length);
-        }
-        position = length;
-    }
-
-    @Override
-    public void flush() throws IOException {
-        flushBuffer();
-        out.flush();
-    }
-
-    @Override
-    public void close() throws IOException {
-        if (!closed) {
-            flushBuffer();
-            closed = true;
-            doClose();
-            out.close();
-        }
-    }
-
-    protected abstract void doClose() throws IOException;
-
-    @Override
-    public void reset() throws IOException {
-        position = 0;
-        out.reset();
-    }
-
-    private void flushBuffer() throws IOException {
-        if (position > 0) {
-            compress(uncompressed, 0, position, out);
-            position = 0;
-        }
-    }
-
-    protected abstract void writeHeader(StreamOutput out) throws IOException;
-
-    /**
-     * Compresses the data into the output
-     */
-    protected abstract void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException;
-
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java
deleted file mode 100644
index a18a9e3fb65..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import com.ning.compress.lzf.ChunkDecoder;
-import com.ning.compress.lzf.ChunkEncoder;
-import com.ning.compress.lzf.LZFChunk;
-import com.ning.compress.lzf.util.ChunkDecoderFactory;
-import com.ning.compress.lzf.util.ChunkEncoderFactory;
-import org.elasticsearch.test.ESTestCase;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-
-/**
- * Test an extremely rare corruption produced by the pure java impl of ChunkEncoder.
- */
-public class CorruptedCompressorTests extends ESTestCase {
-
-    public void testCorruption() throws IOException {
-        // this test generates a hash collision: [0,1,153,64] hashes the same as [1,153,64,64]
-        // and then leverages the bug s/inPos/0/ to corrupt the array
-        // the first array is used to insert a reference from this hash to offset 6
-        // and then the hash table is reused and still thinks that there is such a hash at position 6
-        // and at position 7, it finds a sequence with the same hash
-        // so it inserts a buggy reference
-        byte[] b1 = new byte[] {0,1,2,3,4,(byte)153,64,64,64,9,9,9,9,9,9,9,9,9,9};
-        byte[] b2 = new byte[] {1,(byte)153,0,0,0,0,(byte)153,64,64,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-        ChunkEncoder encoder = ChunkEncoderFactory.safeInstance();
-        ChunkDecoder decoder = ChunkDecoderFactory.safeInstance();
-        check(encoder, decoder, b1, 0, b1.length);
-        final int off = 6;
-        check(encoder, decoder, b2, off, b2.length - off);
-    }
-
-    private void check(ChunkEncoder encoder, ChunkDecoder decoder, byte[] bytes, int offset, int length) throws IOException {
-        ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-        byte[] expected = new byte[length];
-        byte[] buffer = new byte[LZFChunk.MAX_CHUNK_LEN];
-        byte[] output = new byte[length];
-        System.arraycopy(bytes, offset, expected, 0, length);
-        encoder.encodeAndWriteChunk(bytes, offset, length, outputStream);
-        System.out.println(Arrays.toString(Arrays.copyOf(outputStream.toByteArray(), 20)));
-        InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
-        assertEquals(decoder.decodeChunk(inputStream, buffer, output), length);
-
-        System.out.println(Arrays.toString(Arrays.copyOf(output, 20)));
-        assertArrayEquals(expected, output);
-    }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
deleted file mode 100644
index 3aa2a5de806..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import com.ning.compress.BufferRecycler;
-import com.ning.compress.lzf.ChunkEncoder;
-import com.ning.compress.lzf.LZFChunk;
-import com.ning.compress.lzf.util.ChunkEncoderFactory;
-
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-
-public class LZFCompressedStreamOutput extends CompressedStreamOutput {
-
-    private final BufferRecycler recycler;
-    private final ChunkEncoder encoder;
-
-    public LZFCompressedStreamOutput(StreamOutput out) throws IOException {
-        super(out);
-        this.recycler = BufferRecycler.instance();
-        this.uncompressed = this.recycler.allocOutputBuffer(LZFChunk.MAX_CHUNK_LEN);
-        this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
-        this.encoder = ChunkEncoderFactory.safeInstance(recycler);
-    }
-
-    @Override
-    public void writeHeader(StreamOutput out) throws IOException {
-        // nothing to do here, each chunk has a header of its own
-    }
-
-    @Override
-    protected void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException {
-        encoder.encodeAndWriteChunk(data, offset, len, out);
-    }
-
-    @Override
-    protected void doClose() throws IOException {
-        byte[] buf = uncompressed;
-        if (buf != null) {
-            uncompressed = null;
-            recycler.releaseOutputBuffer(buf);
-        }
-        encoder.close();
-    }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java
deleted file mode 100644
index 89ee148f4a8..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import org.elasticsearch.common.compress.AbstractCompressedStreamTestCase;
-
-public class LZFCompressedStreamTests extends AbstractCompressedStreamTestCase {
-
-    public LZFCompressedStreamTests() {
-        super(new LZFTestCompressor());
-    }
-
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java
deleted file mode 100644
index 8f21b0cbf0b..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-
-// LZF compressor with write support, for testing only
-public class LZFTestCompressor extends LZFCompressor {
-
-    @Override
-    public StreamOutput streamOutput(StreamOutput out) throws IOException {
-        return new LZFCompressedStreamOutput(out);
-    }
-
-}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java
deleted file mode 100644
index 05135f0ed68..00000000000
--- a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.common.compress.lzf;
-
-import org.elasticsearch.common.compress.AbstractCompressedXContentTestCase;
-
-public class LZFXContentTests extends AbstractCompressedXContentTestCase {
-
-    public LZFXContentTests() {
-        super(new LZFTestCompressor());
-    }
-
-}
diff --git a/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java b/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
deleted file mode 100644
index d98574bc379..00000000000
--- a/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.compress;
-
-import org.elasticsearch.Version;
-import org.elasticsearch.action.get.GetResponse;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.Priority;
-import org.elasticsearch.common.compress.Compressor;
-import org.elasticsearch.common.compress.CompressorFactory;
-import org.elasticsearch.common.compress.lzf.LZFTestCompressor;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.test.ESSingleNodeTestCase;
-
-import java.io.IOException;
-
-import static org.hamcrest.Matchers.equalTo;
-
-public class SearchSourceCompressTests extends ESSingleNodeTestCase {
-    public void testSourceCompressionLZF() throws IOException {
-        final Compressor defaultCompressor = CompressorFactory.defaultCompressor();
-        try {
-            CompressorFactory.setDefaultCompressor(new LZFTestCompressor());
-            verifySource(true);
-            verifySource(false);
-            verifySource(null);
-        } finally {
-            CompressorFactory.setDefaultCompressor(defaultCompressor);
-        }
-    }
-
-    private void verifySource(Boolean compress) throws IOException {
-        try {
-            client().admin().indices().prepareDelete("test").execute().actionGet();
-        } catch (Exception e) {
-            // ignore
-        }
-        Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
-        createIndex("test", settings);
-        client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
-
-        String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
-                .startObject("_source").field("compress", compress).endObject()
-                .endObject().endObject().string();
-
-        client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
-
-        for (int i = 1; i < 100; i++) {
-            client().prepareIndex("test", "type1", Integer.toString(i)).setSource(buildSource(i)).execute().actionGet();
-        }
-        client().prepareIndex("test", "type1", Integer.toString(10000)).setSource(buildSource(10000)).execute().actionGet();
-
-        client().admin().indices().prepareRefresh().execute().actionGet();
-
-        for (int i = 1; i < 100; i++) {
-            GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
-            assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(i).bytes().toBytes()));
-        }
-        GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(10000)).execute().actionGet();
-        assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(10000).bytes().toBytes()));
-
-        for (int i = 1; i < 100; i++) {
-            SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.idsQuery("type1").addIds(Integer.toString(i))).execute().actionGet();
-            assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
-            assertThat(searchResponse.getHits().getAt(0).source(), equalTo(buildSource(i).bytes().toBytes()));
-        }
-    }
-
-    private XContentBuilder buildSource(int count) throws IOException {
-        XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
-        StringBuilder sb = new StringBuilder();
-        for (int j = 0; j < count; j++) {
-            sb.append("value").append(j).append(' ');
-        }
-        builder.field("field", sb.toString());
-        return builder.endObject();
-    }
-}
diff --git a/distribution/licenses/compress-lzf-1.0.2.jar.sha1 b/distribution/licenses/compress-lzf-1.0.2.jar.sha1
deleted file mode 100644
index 14e4a7f1476..00000000000
--- a/distribution/licenses/compress-lzf-1.0.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-62896e6fca184c79cc01a14d143f3ae2b4f4b4ae
diff --git a/distribution/licenses/compress-lzf-LICENSE b/distribution/licenses/compress-lzf-LICENSE
deleted file mode 100644
index 8d6813f770f..00000000000
--- a/distribution/licenses/compress-lzf-LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-This copy of Compress-LZF library is licensed under the
-Apache (Software) License, version 2.0 ("the License").
-See the License for details about distribution rights, and the
-specific rights regarding derivate works.
-
-You may obtain a copy of the License at:
-
-http://www.apache.org/licenses/LICENSE-2.0
diff --git a/distribution/licenses/compress-lzf-NOTICE b/distribution/licenses/compress-lzf-NOTICE
deleted file mode 100644
index 382a800c80d..00000000000
--- a/distribution/licenses/compress-lzf-NOTICE
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compress LZF
-
-This library contains efficient implementation of LZF compression format,
-as well as additional helper classes that build on JDK-provided gzip (deflat)
-codec.
-
-## Licensing
-
-Library is licensed under Apache License 2.0, as per accompanying LICENSE file.
-
-## Credit
-
-Library has been written by Tatu Saloranta (tatu.saloranta@iki.fi).
-It was started at Ning, inc., as an official Open Source process used by
-platform backend, but after initial versions has been developed outside of
-Ning by supporting community.
-
-Other contributors include:
-
-* Jon Hartlaub (first versions of streaming reader/writer; unit tests)
-* Cedrik Lime: parallel LZF implementation
-
-Various community members have contributed bug reports, and suggested minor
-fixes; these can be found from file "VERSION.txt" in SCM.
diff --git a/distribution/licenses/snakeyaml-1.15.jar.sha1 b/distribution/licenses/snakeyaml-1.15.jar.sha1
deleted file mode 100644
index 62d6943ca03..00000000000
--- a/distribution/licenses/snakeyaml-1.15.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3b132bea69e8ee099f416044970997bde80f4ea6
diff --git a/distribution/licenses/snakeyaml-LICENSE.txt b/distribution/licenses/snakeyaml-LICENSE.txt
deleted file mode 100644
index d9a10c0d8e8..00000000000
--- a/distribution/licenses/snakeyaml-LICENSE.txt
+++ /dev/null
@@ -1,176 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
diff --git a/distribution/licenses/snakeyaml-NOTICE.txt b/distribution/licenses/snakeyaml-NOTICE.txt
deleted file mode 100644
index 8d1c8b69c3f..00000000000
--- a/distribution/licenses/snakeyaml-NOTICE.txt
+++ /dev/null
@@ -1 +0,0 @@
- 

From ef7258413f796c55196ad80d0c51a56fb8a365da Mon Sep 17 00:00:00 2001
From: Robert Muir <rmuir@apache.org>
Date: Tue, 1 Dec 2015 23:03:54 -0500
Subject: [PATCH 05/41] improve exception and remove complex test, such indexes
 will not be loaded into a closed state.

we will not load them at all.
---
 .../common/compress/CompressorFactory.java    | 11 +++++
 .../OldIndexBackwardsCompatibilityIT.java     | 40 -------------------
 2 files changed, 11 insertions(+), 40 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
index 8598634ac4c..e6c43a524ca 100644
--- a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
+++ b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java
@@ -80,12 +80,23 @@ public class CompressorFactory {
 
         XContentType contentType = XContentFactory.xContentType(bytes);
         if (contentType == null) {
+            if (isAncient(bytes)) {
+                throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
+            }
             throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
         }
 
         return null;
     }
 
+    /** true if the bytes were compressed with LZF: only used before elasticsearch 2.0 */
+    private static boolean isAncient(BytesReference bytes) {
+        return bytes.length() >= 3 &&
+               bytes.get(0) == 'Z' &&
+               bytes.get(1) == 'V' &&
+               (bytes.get(2) == 0 || bytes.get(2) == 1);
+    }
+
     public static Compressor compressor(ChannelBuffer buffer) {
         for (Compressor compressor : compressors) {
             if (compressor.isCompressed(buffer)) {
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
index b72c0998898..137c6c5b2c2 100644
--- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
+++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java
@@ -280,46 +280,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
         }
     }
 
-    public void testHandlingOfUnsupportedDanglingIndexes() throws Exception {
-        setupCluster();
-        Collections.shuffle(unsupportedIndexes, getRandom());
-        for (String index : unsupportedIndexes) {
-            assertUnsupportedIndexHandling(index);
-        }
-    }
-
-    /**
-     * Waits for the index to show up in the cluster state in closed state
-     */
-    void ensureClosed(final String index) throws InterruptedException {
-        assertTrue(awaitBusy(() -> {
-                            ClusterState state = client().admin().cluster().prepareState().get().getState();
-                            return state.metaData().hasIndex(index) && state.metaData().index(index).getState() == IndexMetaData.State.CLOSE;
-                        }
-                )
-        );
-    }
-
-    /**
-     * Checks that the given index cannot be opened due to incompatible version
-     */
-    void assertUnsupportedIndexHandling(String index) throws Exception {
-        long startTime = System.currentTimeMillis();
-        logger.info("--> Testing old index " + index);
-        String indexName = loadIndex(index);
-        // force reloading dangling indices with a cluster state republish
-        client().admin().cluster().prepareReroute().get();
-        ensureClosed(indexName);
-        try {
-            client().admin().indices().prepareOpen(indexName).get();
-            fail("Shouldn't be able to open an old index");
-        } catch (IllegalStateException ex) {
-            assertThat(ex.getMessage(), containsString("was created before v2.0.0.beta1 and wasn't upgraded"));
-        }
-        unloadIndex(indexName);
-        logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
-    }
-
     void assertOldIndexWorks(String index) throws Exception {
         Version version = extractVersion(index);
         String indexName = loadIndex(index);

From ec7458668005af8e262256b2016e39cf0333d87a Mon Sep 17 00:00:00 2001
From: Martijn van Groningen <martijn.v.groningen@gmail.com>
Date: Thu, 19 Nov 2015 07:26:33 +0100
Subject: [PATCH 06/41] metadata: If an alias and index share the same name
 then a descriptive error should be thrown

In 1.x it is possible via index templates to create an index with an alias with the same name as the index. The index name must match the index template and have an alias with the same name as the index being created.
---
 .../cluster/metadata/MetaData.java            | 11 ++++-
 .../cluster/metadata/MetaDataTests.java       | 44 +++++++++++++++++++
 2 files changed, 53 insertions(+), 2 deletions(-)
 create mode 100644 core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java

diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index 9945de1f8d3..d4e7baac790 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -41,6 +41,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.support.LoggerMessageFormat;
 import org.elasticsearch.common.regex.Regex;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.settings.loader.SettingsLoader;
@@ -1029,12 +1030,18 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
 
                 for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
                     AliasMetaData aliasMetaData = aliasCursor.value;
-                    AliasOrIndex.Alias aliasOrIndex = (AliasOrIndex.Alias) aliasAndIndexLookup.get(aliasMetaData.getAlias());
+                    AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
                     if (aliasOrIndex == null) {
                         aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
                         aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
+                    } else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
+                        AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
+                        alias.addIndex(indexMetaData);
+                    } else if (aliasOrIndex instanceof AliasOrIndex.Index) {
+                        AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
+                        throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index [" + index.getIndex().getIndex() + "] have the same name");
                     } else {
-                        aliasOrIndex.addIndex(indexMetaData);
+                        throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
                     }
                 }
             }
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
new file mode 100644
index 00000000000..59116859322
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESTestCase;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class MetaDataTests extends ESTestCase {
+
+    public void testIndexAndAliasWithSameName() {
+        IndexMetaData.Builder builder = IndexMetaData.builder("index")
+                .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
+                .numberOfShards(1)
+                .numberOfReplicas(0)
+                .putAlias(AliasMetaData.builder("index").build());
+        try {
+            MetaData.builder().put(builder).build();
+            fail("expection should have been thrown");
+        } catch (IllegalStateException e) {
+            assertThat(e.getMessage(), equalTo("index and alias names need to be unique, but alias [index] and index [index] have the same name"));
+        }
+    }
+
+}

From 79188ed38de72c3d2d1544bd837811b1b8b8bf69 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Tue, 1 Dec 2015 11:34:32 +0100
Subject: [PATCH 07/41] Treat mappings at an index-level feature.

Today we try to have type-level granularity when dealing with mappings. This
does not play well with the cross-type validations that we are adding. For
instance we prevent the `_parent` field to point to an existing type. This
validation would be skipped today in the case of dedicated master nodes, since
those master nodes would only create the type that is being updated when
updating a mapping.
---
 .../index/NodeMappingRefreshAction.java       | 16 +---
 .../metadata/MetaDataMappingService.java      | 95 ++++++++-----------
 .../cluster/IndicesClusterStateService.java   | 73 +++-----------
 3 files changed, 52 insertions(+), 132 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
index d0eb29d6b22..f8507e5b689 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
@@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.MetaDataMappingService;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.component.AbstractComponent;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.io.stream.StreamInput;
@@ -57,7 +56,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
     public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
         final DiscoveryNodes nodes = state.nodes();
         if (nodes.masterNode() == null) {
-            logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types()));
+            logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
             return;
         }
         transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
@@ -67,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
 
         @Override
         public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
-            metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());
+            metaDataMappingService.refreshMapping(request.index(), request.indexUUID());
             channel.sendResponse(TransportResponse.Empty.INSTANCE);
         }
     }
@@ -76,16 +75,14 @@ public class NodeMappingRefreshAction extends AbstractComponent {
 
         private String index;
         private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
-        private String[] types;
         private String nodeId;
 
         public NodeMappingRefreshRequest() {
         }
 
-        public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {
+        public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
             this.index = index;
             this.indexUUID = indexUUID;
-            this.types = types;
             this.nodeId = nodeId;
         }
 
@@ -107,11 +104,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
             return indexUUID;
         }
 
-
-        public String[] types() {
-            return types;
-        }
-
         public String nodeId() {
             return nodeId;
         }
@@ -120,7 +112,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
         public void writeTo(StreamOutput out) throws IOException {
             super.writeTo(out);
             out.writeString(index);
-            out.writeStringArray(types);
             out.writeString(nodeId);
             out.writeString(indexUUID);
         }
@@ -129,7 +120,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
         public void readFrom(StreamInput in) throws IOException {
             super.readFrom(in);
             index = in.readString();
-            types = in.readStringArray();
             nodeId = in.readString();
             indexUUID = in.readString();
         }
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index c2725359140..44de399bed4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -69,12 +69,10 @@ public class MetaDataMappingService extends AbstractComponent {
     static class RefreshTask {
         final String index;
         final String indexUUID;
-        final String[] types;
 
-        RefreshTask(String index, final String indexUUID, String[] types) {
+        RefreshTask(String index, final String indexUUID) {
             this.index = index;
             this.indexUUID = indexUUID;
-            this.types = types;
         }
     }
 
@@ -120,13 +118,16 @@ public class MetaDataMappingService extends AbstractComponent {
             // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
             // the latest (based on order) update mapping one per node
             List<RefreshTask> allIndexTasks = entry.getValue();
-            List<RefreshTask> tasks = new ArrayList<>();
+            boolean hasTaskWithRightUUID = false;
             for (RefreshTask task : allIndexTasks) {
-                if (!indexMetaData.isSameUUID(task.indexUUID)) {
+                if (indexMetaData.isSameUUID(task.indexUUID)) {
+                    hasTaskWithRightUUID = true;
+                } else {
                     logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
-                    continue;
                 }
-                tasks.add(task);
+            }
+            if (hasTaskWithRightUUID == false) {
+                continue;
             }
 
             // construct the actual index if needed, and make sure the relevant mappings are there
@@ -134,24 +135,17 @@ public class MetaDataMappingService extends AbstractComponent {
             IndexService indexService = indicesService.indexService(index);
             if (indexService == null) {
                 // we need to create the index here, and add the current mapping to it, so we can merge
-                indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
+                indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
                 removeIndex = true;
-                Set<String> typesToIntroduce = new HashSet<>();
-                for (RefreshTask task : tasks) {
-                    Collections.addAll(typesToIntroduce, task.types);
-                }
-                for (String type : typesToIntroduce) {
-                    // only add the current relevant mapping (if exists)
-                    if (indexMetaData.getMappings().containsKey(type)) {
-                        // don't apply the default mapping, it has been applied when the mapping was created
-                        indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
-                    }
+                for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
+                    // don't apply the default mapping, it has been applied when the mapping was created
+                    indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
                 }
             }
 
             IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
             try {
-                boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
+                boolean indexDirty = refreshIndexMapping(indexService, builder);
                 if (indexDirty) {
                     mdBuilder.put(builder);
                     dirty = true;
@@ -169,38 +163,28 @@ public class MetaDataMappingService extends AbstractComponent {
         return ClusterState.builder(currentState).metaData(mdBuilder).build();
     }
 
-    private boolean processIndexMappingTasks(List<RefreshTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
+    private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
         boolean dirty = false;
         String index = indexService.index().name();
-        // keep track of what we already refreshed, no need to refresh it again...
-        Set<String> processedRefreshes = new HashSet<>();
-        for (RefreshTask refreshTask : tasks) {
-            try {
-                List<String> updatedTypes = new ArrayList<>();
-                for (String type : refreshTask.types) {
-                    if (processedRefreshes.contains(type)) {
-                        continue;
-                    }
-                    DocumentMapper mapper = indexService.mapperService().documentMapper(type);
-                    if (mapper == null) {
-                        continue;
-                    }
-                    if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
-                        updatedTypes.add(type);
-                        builder.putMapping(new MappingMetaData(mapper));
-                    }
-                    processedRefreshes.add(type);
+        try {
+            List<String> updatedTypes = new ArrayList<>();
+            for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
+                final String type = mapper.type();
+                if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
+                    updatedTypes.add(type);
                 }
-
-                if (updatedTypes.isEmpty()) {
-                    continue;
-                }
-
-                logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
-                dirty = true;
-            } catch (Throwable t) {
-                logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
             }
+
+            // if a single type is not up-to-date, re-send everything
+            if (updatedTypes.isEmpty() == false) {
+                logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
+                dirty = true;
+                for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
+                    builder.putMapping(new MappingMetaData(mapper));
+                }
+            }
+        } catch (Throwable t) {
+            logger.warn("[{}] failed to refresh-mapping in cluster state", t, index);
         }
         return dirty;
     }
@@ -208,9 +192,9 @@ public class MetaDataMappingService extends AbstractComponent {
     /**
      * Refreshes mappings if they are not the same between original and parsed version
      */
-    public void refreshMapping(final String index, final String indexUUID, final String... types) {
-        final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types);
-        clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]",
+    public void refreshMapping(final String index, final String indexUUID) {
+        final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
+        clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]",
             refreshTask,
             ClusterStateTaskConfig.build(Priority.HIGH),
             refreshExecutor,
@@ -236,18 +220,13 @@ public class MetaDataMappingService extends AbstractComponent {
                                 if (indicesService.hasIndex(index) == false) {
                                     indicesToClose.add(index);
                                     indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
-                                    // make sure to add custom default mapping if exists
-                                    if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
-                                        indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
+                                    // add mappings for all types, we need them for cross-type validation
+                                    for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
+                                        indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
                                     }
                                 } else {
                                     indexService = indicesService.indexService(index);
                                 }
-                                // only add the current relevant mapping (if exists and not yet added)
-                                if (indexMetaData.getMappings().containsKey(request.type()) &&
-                                        !indexService.mapperService().hasMapping(request.type())) {
-                                    indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
-                                }
                             }
                         }
                     }
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index cd60b87765a..fc793a5dfda 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -349,7 +349,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
                 // we only create / update here
                 continue;
             }
-            List<String> typesToRefresh = new ArrayList<>();
+            boolean requireRefresh = false;
             String index = indexMetaData.getIndex();
             IndexService indexService = indicesService.indexService(index);
             if (indexService == null) {
@@ -358,31 +358,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
             }
             try {
                 MapperService mapperService = indexService.mapperService();
-                // first, go over and update the _default_ mapping (if exists)
-                if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
-                    boolean requireRefresh = processMapping(index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source());
-                    if (requireRefresh) {
-                        typesToRefresh.add(MapperService.DEFAULT_MAPPING);
-                    }
-                }
-
                 // go over and add the relevant mappings (or update them)
                 for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
                     MappingMetaData mappingMd = cursor.value;
                     String mappingType = mappingMd.type();
                     CompressedXContent mappingSource = mappingMd.source();
-                    if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first
-                        continue;
-                    }
-                    boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource);
-                    if (requireRefresh) {
-                        typesToRefresh.add(mappingType);
-                    }
+                    requireRefresh |= processMapping(index, mapperService, mappingType, mappingSource);
                 }
-                if (!typesToRefresh.isEmpty() && sendRefreshMapping) {
+                if (requireRefresh && sendRefreshMapping) {
                     nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
                             new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(),
-                                    typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())
+                                    event.state().nodes().localNodeId())
                     );
                 }
             } catch (Throwable t) {
@@ -398,26 +384,21 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
     }
 
     private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable {
-        if (!seenMappings.containsKey(new Tuple<>(index, mappingType))) {
-            seenMappings.put(new Tuple<>(index, mappingType), true);
-        }
-
-        // refresh mapping can happen for 2 reasons. The first is less urgent, and happens when the mapping on this
-        // node is ahead of what there is in the cluster state (yet an update-mapping has been sent to it already,
-        // it just hasn't been processed yet and published). Eventually, the mappings will converge, and the refresh
-        // mapping sent is more of a safe keeping (assuming the update mapping failed to reach the master, ...)
-        // the second case is where the parsing/merging of the mapping from the metadata doesn't result in the same
+        // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
         // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
         // merge version of it, which it does when refreshing the mappings), and warn log it.
         boolean requiresRefresh = false;
         try {
-            if (!mapperService.hasMapping(mappingType)) {
+            DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
+
+            if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) {
+                String op = existingMapper == null ? "adding" : "updating";
                 if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
-                    logger.debug("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
+                    logger.debug("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
                 } else if (logger.isTraceEnabled()) {
-                    logger.trace("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
+                    logger.trace("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
                 } else {
-                    logger.debug("[{}] adding mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType);
+                    logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, op, mappingType);
                 }
                 // we don't apply default, since it has been applied when the mappings were parsed initially
                 mapperService.merge(mappingType, mappingSource, false, true);
@@ -425,24 +406,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
                     logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
                     requiresRefresh = true;
                 }
-            } else {
-                DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
-                if (!mappingSource.equals(existingMapper.mappingSource())) {
-                    // mapping changed, update it
-                    if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
-                        logger.debug("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
-                    } else if (logger.isTraceEnabled()) {
-                        logger.trace("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
-                    } else {
-                        logger.debug("[{}] updating mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType);
-                    }
-                    // we don't apply default, since it has been applied when the mappings were parsed initially
-                    mapperService.merge(mappingType, mappingSource, false, true);
-                    if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
-                        requiresRefresh = true;
-                        logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
-                    }
-                }
             }
         } catch (Throwable e) {
             logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource);
@@ -781,27 +744,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
         } catch (Throwable e) {
             logger.warn("failed to clean index ({})", e, reason);
         }
-        clearSeenMappings(index);
 
     }
 
-    private void clearSeenMappings(String index) {
-        // clear seen mappings as well
-        for (Tuple<String, String> tuple : seenMappings.keySet()) {
-            if (tuple.v1().equals(index)) {
-                seenMappings.remove(tuple);
-            }
-        }
-    }
-
     private void deleteIndex(String index, String reason) {
         try {
             indicesService.deleteIndex(index, reason);
         } catch (Throwable e) {
             logger.warn("failed to delete index ({})", e, reason);
         }
-        // clear seen mappings as well
-        clearSeenMappings(index);
 
     }
 

From 05430a788a383ee228c9140c0fd9c1268bb4557c Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 2 Dec 2015 10:34:18 -0500
Subject: [PATCH 08/41] Remove and forbid use of the type-unsafe empty
 Collections fields

This commit removes and now forbids all uses of the type-unsafe empty
Collections fields Collections#EMPTY_LIST, Collections#EMPTY_MAP, and
Collections#EMPTY_SET. The type-safe methods Collections#emptyList,
Collections#emptyMap, and Collections#emptySet should be used instead.
---
 .../src/main/resources/forbidden/all-signatures.txt   |  4 ++++
 .../cluster/metadata/MetaDataCreateIndexService.java  |  2 +-
 .../cluster/metadata/MetaDataIndexAliasesService.java |  2 +-
 .../cluster/metadata/MetaDataIndexUpgradeService.java |  6 +++---
 .../cluster/metadata/MetaDataMappingService.java      |  2 +-
 .../org/elasticsearch/common/util/ExtensionPoint.java |  2 +-
 .../TransportNodesListGatewayStartedShards.java       |  2 +-
 .../index/analysis/AnalysisRegistry.java              |  2 +-
 .../org/elasticsearch/index/translog/Translog.java    |  4 ++--
 .../org/elasticsearch/indices/IndicesService.java     |  4 ++--
 .../indices/cluster/IndicesClusterStateService.java   |  2 +-
 .../store/TransportNodesListShardStoreMetaData.java   |  2 +-
 .../elasticsearch/percolator/PercolateContext.java    |  2 +-
 .../repositories/blobstore/BlobStoreRepository.java   |  2 +-
 .../main/java/org/elasticsearch/script/Template.java  |  4 ++--
 .../heuristics/SignificanceHeuristicStreams.java      |  2 +-
 .../BucketMetricsPipelineAggregator.java              |  4 ++--
 .../max/MaxBucketPipelineAggregator.java              |  2 +-
 .../min/MinBucketPipelineAggregator.java              |  2 +-
 .../pipeline/movavg/models/MovAvgModelStreams.java    |  2 +-
 .../org/elasticsearch/search/suggest/Suggesters.java  |  2 +-
 .../completion/CompletionSuggestionContext.java       |  4 ++--
 .../java/org/elasticsearch/snapshots/Snapshot.java    |  4 +---
 .../elasticsearch/transport/local/LocalTransport.java |  2 +-
 .../client/transport/FailAndRetryMockTransport.java   |  2 +-
 .../transport/TransportClientNodesServiceTests.java   |  2 +-
 .../org/elasticsearch/index/IndexModuleTests.java     |  6 +++---
 .../org/elasticsearch/index/IndexSettingsTests.java   |  4 ++--
 .../index/analysis/AnalysisModuleTests.java           |  2 +-
 .../index/analysis/AnalysisTestsHelper.java           |  2 +-
 .../index/analysis/CompoundAnalysisTests.java         |  4 ++--
 .../org/elasticsearch/index/codec/CodecTests.java     |  2 +-
 .../index/engine/InternalEngineTests.java             |  6 +++---
 .../index/query/AbstractQueryTestCase.java            |  2 +-
 .../index/query/TemplateQueryParserTests.java         |  2 +-
 .../significant/SignificanceHeuristicTests.java       |  4 ++--
 .../cloud/gce/GceComputeServiceImpl.java              | 11 ++++++-----
 .../org/elasticsearch/test/IndexSettingsModule.java   |  2 +-
 .../org/elasticsearch/test/TestSearchContext.java     |  2 +-
 .../test/store/MockFSDirectoryService.java            |  2 +-
 .../test/transport/CapturingTransport.java            |  2 +-
 41 files changed, 63 insertions(+), 60 deletions(-)

diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/all-signatures.txt
index 0809cdab9c2..5a91807233b 100644
--- a/buildSrc/src/main/resources/forbidden/all-signatures.txt
+++ b/buildSrc/src/main/resources/forbidden/all-signatures.txt
@@ -112,3 +112,7 @@ java.lang.System#setProperty(java.lang.String,java.lang.String)
 java.lang.System#clearProperty(java.lang.String)
 java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view
 
+@defaultMessage Avoid unchecked warnings by using Collections#empty(List|Map|Set) methods
+java.util.Collections#EMPTY_LIST
+java.util.Collections#EMPTY_MAP
+java.util.Collections#EMPTY_SET
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
index d3ba811a6e5..96d378af042 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java
@@ -299,7 +299,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
                     // Set up everything, now locally create the index to see that things are ok, and apply
                     final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
                     // create the index here (on the master) to validate it can be created, as well as adding the mapping
-                    indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.EMPTY_LIST);
+                    indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
                     indexCreated = true;
                     // now add the mappings
                     IndexService indexService = indicesService.indexServiceSafe(request.index());
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
index b13f9711bef..71ef9c22c33 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java
@@ -99,7 +99,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
                                     if (indexService == null) {
                                         // temporarily create the index and add mappings so we can parse the filter
                                         try {
-                                            indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
+                                            indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
                                             if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
                                                 indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
                                             }
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
index 2d89857f60d..00904af8915 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
@@ -218,8 +218,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
         try {
             // We cannot instantiate real analysis server at this point because the node might not have
             // been started yet. However, we don't really need real analyzers at this stage - so we can fake it
-            IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST);
-            SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
+            IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
+            SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
 
             try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
                 try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
@@ -256,7 +256,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
         };
 
         public FakeAnalysisService(IndexSettings indexSettings) {
-            super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+            super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
         }
 
         @Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 44de399bed4..854d55020dd 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -219,7 +219,7 @@ public class MetaDataMappingService extends AbstractComponent {
                                 IndexService indexService;
                                 if (indicesService.hasIndex(index) == false) {
                                     indicesToClose.add(index);
-                                    indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
+                                    indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
                                     // add mappings for all types, we need them for cross-type validation
                                     for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
                                         indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
diff --git a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java
index 2ce4190a00e..d25113a54bb 100644
--- a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java
+++ b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java
@@ -123,7 +123,7 @@ public abstract class ExtensionPoint {
     public static final class SelectedType<T> extends ClassMap<T> {
 
         public SelectedType(String name, Class<T> extensionClass) {
-            super(name, extensionClass, Collections.EMPTY_SET);
+            super(name, extensionClass, Collections.emptySet());
         }
 
         /**
diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
index bfc078a6679..d91b4bd8cdd 100644
--- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
+++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
@@ -131,7 +131,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
                 if (metaData != null) {
                     ShardPath shardPath = null;
                     try {
-                        IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
+                        IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
                         shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
                         if (shardPath == null) {
                             throw new IllegalStateException(shardId + " no shard path found");
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
index 4e7b66dec8f..86c06dbe54f 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java
@@ -55,7 +55,7 @@ public final class AnalysisRegistry implements Closeable {
     private final Environment environemnt;
 
     public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
-        this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+        this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
     }
 
     public AnalysisRegistry(HunspellService hunspellService, Environment environment,
diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
index e6ff9344acd..35dd895bc2e 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
@@ -166,7 +166,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
                 current = createWriter(checkpoint.generation + 1);
                 this.lastCommittedTranslogFileGeneration = translogGeneration.translogFileGeneration;
             } else {
-                this.recoveredTranslogs = Collections.EMPTY_LIST;
+                this.recoveredTranslogs = Collections.emptyList();
                 IOUtils.rm(location);
                 logger.debug("wipe translog location - creating new translog");
                 Files.createDirectories(location);
@@ -582,7 +582,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
      * and updated with any future translog.
      */
     public static final class View implements Closeable {
-        public static final Translog.View EMPTY_VIEW = new View(Collections.EMPTY_LIST, null);
+        public static final Translog.View EMPTY_VIEW = new View(Collections.emptyList(), null);
 
         boolean closed;
         // last in this list is always FsTranslog.current
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index ca059fa25f0..ad1cd399c36 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -264,7 +264,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
         }
         final String indexName = indexMetaData.getIndex();
         final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(indexName, indexExpression, clusterService.state());
-        final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST, indexNameMatcher);
+        final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList(), indexNameMatcher);
         Index index = new Index(indexMetaData.getIndex());
         if (indices.containsKey(index.name())) {
             throw new IndexAlreadyExistsException(index);
@@ -562,7 +562,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
         // play safe here and make sure that we take node level settings into account.
         // we might run on nodes where we use shard FS and then in the future don't delete
         // actual content.
-        return new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
+        return new IndexSettings(metaData, settings, Collections.emptyList());
     }
 
     /**
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index fc793a5dfda..64ff6c74587 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -245,7 +245,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
             } else {
                 final IndexMetaData metaData = previousState.metaData().index(index);
                 assert metaData != null;
-                indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
+                indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
                 indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
             }
             try {
diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
index 44c9fbec0e0..d963ea24303 100644
--- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
+++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java
@@ -171,7 +171,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
             if (!storeType.contains("fs")) {
                 return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
             }
-            final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
+            final IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(metaData, settings, Collections.emptyList());
             final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
             if (shardPath == null) {
                 return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
index 6e164c3afc5..70abaaaff3d 100644
--- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
+++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
@@ -729,7 +729,7 @@ public class PercolateContext extends SearchContext {
 
     @Override
     public Set<String> getHeaders() {
-        return Collections.EMPTY_SET;
+        return Collections.emptySet();
     }
 
     @Override
diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 8c5088e757b..2648a183362 100644
--- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -294,7 +294,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
         if (readOnly()) {
             throw new RepositoryException(this.repositoryName, "cannot delete snapshot from a readonly repository");
         }
-        List<String> indices = Collections.EMPTY_LIST;
+        List<String> indices = Collections.emptyList();
         Snapshot snapshot = null;
         try {
             snapshot = readSnapshot(snapshotId);
diff --git a/core/src/main/java/org/elasticsearch/script/Template.java b/core/src/main/java/org/elasticsearch/script/Template.java
index babe488d4ad..e488f0a01e5 100644
--- a/core/src/main/java/org/elasticsearch/script/Template.java
+++ b/core/src/main/java/org/elasticsearch/script/Template.java
@@ -121,12 +121,12 @@ public class Template extends Script {
 
     @SuppressWarnings("unchecked")
     public static Script parse(Map<String, Object> config, boolean removeMatchedEntries, ParseFieldMatcher parseFieldMatcher) {
-        return new TemplateParser(Collections.EMPTY_MAP, MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher);
+        return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher);
     }
 
     @SuppressWarnings("unchecked")
     public static Template parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
-        return new TemplateParser(Collections.EMPTY_MAP, MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher);
+        return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher);
     }
 
     @Deprecated
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicStreams.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicStreams.java
index c58e923280d..64d2ae659e0 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicStreams.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicStreams.java
@@ -31,7 +31,7 @@ import java.util.Map;
  */
 public class SignificanceHeuristicStreams {
 
-    private static Map<String, Stream> STREAMS = Collections.EMPTY_MAP;
+    private static Map<String, Stream> STREAMS = Collections.emptyMap();
 
     static {
         HashMap<String, Stream> map = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java
index 89955ef0278..0f2ffea9a75 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java
@@ -78,7 +78,7 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
                 }
             }
         }
-        return buildAggregation(Collections.EMPTY_LIST, metaData());
+        return buildAggregation(Collections.emptyList(), metaData());
     }
 
     /**
@@ -123,4 +123,4 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
         gapPolicy.writeTo(out);
     }
 
-}
\ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java
index 8101c3caae2..95a70af7934 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/max/MaxBucketPipelineAggregator.java
@@ -90,7 +90,7 @@ public class MaxBucketPipelineAggregator extends BucketMetricsPipelineAggregator
     @Override
     protected InternalAggregation buildAggregation(List<PipelineAggregator> pipelineAggregators, Map<String, Object> metadata) {
         String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]);
-        return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.EMPTY_LIST, metaData());
+        return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.emptyList(), metaData());
     }
 
     public static class Factory extends PipelineAggregatorFactory {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java
index 5da74281290..755b2060ae6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/min/MinBucketPipelineAggregator.java
@@ -91,7 +91,7 @@ public class MinBucketPipelineAggregator extends BucketMetricsPipelineAggregator
     protected InternalAggregation buildAggregation(java.util.List<PipelineAggregator> pipelineAggregators,
             java.util.Map<String, Object> metadata) {
         String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]);
-        return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.EMPTY_LIST, metaData());
+        return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.emptyList(), metaData());
     };
 
     public static class Factory extends PipelineAggregatorFactory {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelStreams.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelStreams.java
index d0314f4c4f6..f1238fb9fbd 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelStreams.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/movavg/models/MovAvgModelStreams.java
@@ -32,7 +32,7 @@ import java.util.Map;
  */
 public class MovAvgModelStreams {
 
-    private static Map<String, Stream> STREAMS = Collections.EMPTY_MAP;
+    private static Map<String, Stream> STREAMS = Collections.emptyMap();
 
     static {
         HashMap<String, Stream> map = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java
index a6f66fdd763..9eba50f478a 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggesters.java
@@ -35,7 +35,7 @@ public final class Suggesters extends ExtensionPoint.ClassMap<Suggester> {
     private final Map<String, Suggester> parsers;
 
     public Suggesters() {
-        this(Collections.EMPTY_MAP);
+        this(Collections.emptyMap());
     }
 
     public Suggesters(Map<String, Suggester> suggesters) {
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java
index eab62a063a7..8ffd497eb3a 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java
@@ -38,10 +38,10 @@ public class CompletionSuggestionContext extends SuggestionSearchContext.Suggest
     private CompletionFieldMapper.CompletionFieldType fieldType;
     private CompletionSuggestionBuilder.FuzzyOptionsBuilder fuzzyOptionsBuilder;
     private CompletionSuggestionBuilder.RegexOptionsBuilder regexOptionsBuilder;
-    private Map<String, List<ContextMapping.QueryContext>> queryContexts = Collections.EMPTY_MAP;
+    private Map<String, List<ContextMapping.QueryContext>> queryContexts = Collections.emptyMap();
     private final MapperService mapperService;
     private final IndexFieldDataService indexFieldDataService;
-    private Set<String> payloadFields = Collections.EMPTY_SET;
+    private Set<String> payloadFields = Collections.emptySet();
 
     CompletionSuggestionContext(Suggester suggester, MapperService mapperService, IndexFieldDataService indexFieldDataService) {
         super(suggester);
diff --git a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
index d4311cab901..1206ef53501 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import static java.util.Collections.*;
-
 /**
  * Represent information about snapshot
  */
@@ -93,7 +91,7 @@ public class Snapshot implements Comparable<Snapshot>, ToXContent, FromXContentB
      * Special constructor for the prototype object
      */
     private Snapshot() {
-        this("", (List<String>) EMPTY_LIST, 0);
+        this("", Collections.emptyList(), 0);
     }
 
     private static SnapshotState snapshotState(String reason, List<SnapshotShardFailure> shardFailures) {
diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
index 2dca60cf53c..dda3451e950 100644
--- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
@@ -154,7 +154,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
 
     @Override
     public Map<String, BoundTransportAddress> profileBoundAddresses() {
-        return Collections.EMPTY_MAP;
+        return Collections.emptyMap();
     }
 
     @Override
diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java
index 95456fda901..9f16ade87e8 100644
--- a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java
+++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java
@@ -189,6 +189,6 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
 
     @Override
     public Map<String, BoundTransportAddress> profileBoundAddresses() {
-        return Collections.EMPTY_MAP;
+        return Collections.emptyMap();
     }
 }
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
index 3f5a91960fc..093e46186b3 100644
--- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
@@ -63,7 +63,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
             transport = new FailAndRetryMockTransport<TestResponse>(getRandom()) {
                 @Override
                 public List<String> getLocalAddresses() {
-                    return Collections.EMPTY_LIST;
+                    return Collections.emptyList();
                 }
 
                 @Override
diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java
index 30cb7cc8f99..2b76d03952d 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java
@@ -104,8 +104,8 @@ public class IndexModuleTests extends ESTestCase {
         Set<ScriptEngineService> scriptEngines = new HashSet<>();
         scriptEngines.add(new MustacheScriptEngineService(settings));
         scriptEngines.addAll(Arrays.asList(scriptEngineServices));
-        ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), new ScriptContextRegistry(Collections.EMPTY_LIST));
-        IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, Collections.EMPTY_SET, new NamedWriteableRegistry());
+        ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), new ScriptContextRegistry(Collections.emptyList()));
+        IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, Collections.emptySet(), new NamedWriteableRegistry());
         return new NodeServicesProvider(threadPool, indicesQueryCache, null, warmer, bigArrays, client, scriptService, indicesQueriesRegistry, indicesFieldDataCache, circuitBreakerService);
     }
 
@@ -251,7 +251,7 @@ public class IndexModuleTests extends ESTestCase {
             assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage());
         }
     }
-  
+
     public void testSetupWithoutType() throws IOException {
         Settings indexSettings = Settings.settingsBuilder()
                 .put("index.similarity.my_similarity.foo", "bar")
diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java
index 9a1dc3587a9..3f97fe402fa 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java
@@ -95,7 +95,7 @@ public class IndexSettingsTests extends ESTestCase {
     public void testSettingsConsistency() {
         Version version = VersionUtils.getPreviousVersion();
         IndexMetaData metaData = newIndexMeta("index", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build());
-        IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY, Collections.EMPTY_LIST);
+        IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY, Collections.emptyList());
         assertEquals(version, settings.getIndexVersionCreated());
         assertEquals("_na_", settings.getUUID());
         try {
@@ -106,7 +106,7 @@ public class IndexSettingsTests extends ESTestCase {
         }
 
         metaData = newIndexMeta("index", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build());
-        settings = new IndexSettings(metaData, Settings.EMPTY, Collections.EMPTY_LIST);
+        settings = new IndexSettings(metaData, Settings.EMPTY, Collections.emptyList());
         try {
             settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put("index.test.setting.int", 42).build()));
             fail("uuid missing/change");
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
index ef13a891a29..7cd16e350a4 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
@@ -72,7 +72,7 @@ public class AnalysisModuleTests extends ModuleTestCase {
 
     public AnalysisRegistry getNewRegistry(Settings settings) {
        return new AnalysisRegistry(null, new Environment(settings),
-                Collections.EMPTY_MAP, Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new), Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+                Collections.emptyMap(), Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new), Collections.emptyMap(), Collections.emptyMap());
     }
 
     private Settings loadFromClasspath(String path) {
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
index 7d4164939b0..1404716b0c8 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
@@ -51,6 +51,6 @@ public class AnalysisTestsHelper {
         }
         IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, settings);
         Environment environment = new Environment(settings);
-        return new AnalysisRegistry(new HunspellService(settings, environment, Collections.EMPTY_MAP), environment).build(idxSettings);
+        return new AnalysisRegistry(new HunspellService(settings, environment, Collections.emptyMap()), environment).build(idxSettings);
     }
 }
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
index 0eb916826f4..e685c21422b 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
@@ -52,7 +52,7 @@ public class CompoundAnalysisTests extends ESTestCase {
         Settings settings = getJsonSettings();
         IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, settings);
         AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings),
-                Collections.EMPTY_MAP,Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new),Collections.EMPTY_MAP,Collections.EMPTY_MAP).build(idxSettings);
+                Collections.emptyMap(),Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new),Collections.emptyMap(),Collections.emptyMap()).build(idxSettings);
 
         TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
         MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
@@ -71,7 +71,7 @@ public class CompoundAnalysisTests extends ESTestCase {
         Index index = new Index("test");
         IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, settings);
         AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings),
-                Collections.EMPTY_MAP, Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new),Collections.EMPTY_MAP,Collections.EMPTY_MAP).build(idxSettings);
+                Collections.emptyMap(), Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new),Collections.emptyMap(),Collections.emptyMap()).build(idxSettings);
 
         Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
 
diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
index e8c351c0b37..eae80418b0d 100644
--- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
+++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
@@ -108,7 +108,7 @@ public class CodecTests extends ESTestCase {
                 .put("path.home", createTempDir())
                 .build();
         IndexSettings settings = IndexSettingsModule.newIndexSettings(new Index("_na"), nodeSettings);
-        SimilarityService similarityService = new SimilarityService(settings, Collections.EMPTY_MAP);
+        SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap());
         AnalysisService analysisService = new AnalysisRegistry(null, new Environment(nodeSettings)).build(settings);
         MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap());
         MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry);
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 01c47cce05e..2a122c38dde 100644
--- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -1617,7 +1617,7 @@ public class InternalEngineTests extends ESTestCase {
         // now it should be OK.
         IndexSettings indexSettings = new IndexSettings(defaultSettings.getIndexMetaData(),
                 Settings.builder().put(defaultSettings.getSettings()).put(EngineConfig.INDEX_FORCE_NEW_TRANSLOG, true).build(),
-                Collections.EMPTY_LIST);
+                Collections.emptyList());
         engine = createEngine(indexSettings, store, primaryTranslogDir, new MergeSchedulerConfig(indexSettings), newMergePolicy());
     }
 
@@ -1901,8 +1901,8 @@ public class InternalEngineTests extends ESTestCase {
             RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
             Index index = new Index(indexName);
             IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
-            AnalysisService analysisService = new AnalysisService(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
-            SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
+            AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
+            SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
             MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry();
             MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry);
             DocumentMapper.Builder b = new DocumentMapper.Builder(settings, rootBuilder, mapperService);
diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java
index f64dc1a55de..aebf00e0728 100644
--- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java
+++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java
@@ -241,7 +241,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
         ).createInjector();
         AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings);
         ScriptService scriptService = injector.getInstance(ScriptService.class);
-        SimilarityService similarityService = new SimilarityService(idxSettings, Collections.EMPTY_MAP);
+        SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
         MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class);
         MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry);
         indexFieldDataService = new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService);
diff --git a/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTests.java
index 78379260b72..d62a11077ec 100644
--- a/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTests.java
@@ -120,7 +120,7 @@ public class TemplateQueryParserTests extends ESTestCase {
 
         AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings);
         ScriptService scriptService = injector.getInstance(ScriptService.class);
-        SimilarityService similarityService = new SimilarityService(idxSettings, Collections.EMPTY_MAP);
+        SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
         MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry();
         MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry);
         IndexFieldDataService indexFieldDataService =new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
index b10dfd31b35..d20dff0ae05 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
@@ -103,13 +103,13 @@ public class SignificanceHeuristicTests extends ESTestCase {
         if (randomBoolean()) {
             buckets.add(new SignificantLongTerms.Bucket(1, 2, 3, 4, 123, InternalAggregations.EMPTY, null));
             sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets,
-                    Collections.EMPTY_LIST, null);
+                    Collections.emptyList(), null);
             sTerms[1] = new SignificantLongTerms();
         } else {
 
             BytesRef term = new BytesRef("someterm");
             buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY));
-            sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, Collections.EMPTY_LIST,
+            sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, Collections.emptyList(),
                     null);
             sTerms[1] = new SignificantStringTerms();
         }
diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
index d8f6dd331b2..0171a7fe539 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
@@ -79,14 +79,15 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
                         return list.execute();
                     }
                 });
-                if (instanceList.isEmpty()) {
-                    return Collections.EMPTY_LIST;
-                }
-                return instanceList.getItems();
+                // assist type inference
+                List<Instance> items = instanceList.isEmpty() ? Collections.emptyList() : instanceList.getItems();
+                return items;
             } catch (PrivilegedActionException e) {
                 logger.warn("Problem fetching instance list for zone {}", zoneId);
                 logger.debug("Full exception:", e);
-                return Collections.EMPTY_LIST;
+                // assist type inference
+                List<Instance> items = Collections.emptyList();
+                return items;
             }
         }).reduce(new ArrayList<>(), (a, b) -> {
             a.addAll(b);
diff --git a/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java b/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
index c040aae1292..39e1857f412 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
+++ b/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
@@ -54,6 +54,6 @@ public class IndexSettingsModule extends AbstractModule {
                 .put(settings)
                 .build();
         IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).build();
-        return new IndexSettings(metaData, Settings.EMPTY, Collections.EMPTY_LIST);
+        return new IndexSettings(metaData, Settings.EMPTY, Collections.emptyList());
     }
 }
diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
index d4e2af5f30c..468b1877250 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
+++ b/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
@@ -650,7 +650,7 @@ public class TestSearchContext extends SearchContext {
 
     @Override
     public Set<String> getHeaders() {
-        return Collections.EMPTY_SET;
+        return Collections.emptySet();
     }
 
     @Override
diff --git a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
index 9c2b8612d51..27a2e6fb22e 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
+++ b/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -173,7 +173,7 @@ public class MockFSDirectoryService extends FsDirectoryService {
     private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) {
         final IndexSettings indexSettings = indexStore.getIndexSettings();
         final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()).settings(Settings.builder().put(indexSettings.getSettings()).put(IndexModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())).build();
-        final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings(), Collections.EMPTY_LIST);
+        final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings(), Collections.emptyList());
         return new FsDirectoryService(newIndexSettings, indexStore, path);
     }
 
diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
index 476b89aa1a9..2363d98a113 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
+++ b/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
@@ -178,6 +178,6 @@ public class CapturingTransport implements Transport {
 
     @Override
     public List<String> getLocalAddresses() {
-        return Collections.EMPTY_LIST;
+        return Collections.emptyList();
     }
 }

From fbe736c9bbd65b261c37b80a02927a83732c022a Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 2 Dec 2015 10:49:35 -0500
Subject: [PATCH 09/41] Cleaner type-inference assistance

---
 .../org/elasticsearch/cloud/gce/GceComputeServiceImpl.java  | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
index 0171a7fe539..943b1cd0eae 100644
--- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
+++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java
@@ -80,14 +80,12 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
                     }
                 });
                 // assist type inference
-                List<Instance> items = instanceList.isEmpty() ? Collections.emptyList() : instanceList.getItems();
-                return items;
+                return instanceList.isEmpty() ? Collections.<Instance>emptyList() : instanceList.getItems();
             } catch (PrivilegedActionException e) {
                 logger.warn("Problem fetching instance list for zone {}", zoneId);
                 logger.debug("Full exception:", e);
                 // assist type inference
-                List<Instance> items = Collections.emptyList();
-                return items;
+                return Collections.<Instance>emptyList();
             }
         }).reduce(new ArrayList<>(), (a, b) -> {
             a.addAll(b);

From 199a05311ece371a883b2d3490b8a73417a34139 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 2 Dec 2015 10:55:25 -0500
Subject: [PATCH 10/41] Remove unchecked warnings rendered unnecessary

---
 core/src/main/java/org/elasticsearch/script/Template.java | 2 --
 1 file changed, 2 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/script/Template.java b/core/src/main/java/org/elasticsearch/script/Template.java
index e488f0a01e5..4419d6f5093 100644
--- a/core/src/main/java/org/elasticsearch/script/Template.java
+++ b/core/src/main/java/org/elasticsearch/script/Template.java
@@ -119,12 +119,10 @@ public class Template extends Script {
         return template;
     }
 
-    @SuppressWarnings("unchecked")
     public static Script parse(Map<String, Object> config, boolean removeMatchedEntries, ParseFieldMatcher parseFieldMatcher) {
         return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher);
     }
 
-    @SuppressWarnings("unchecked")
     public static Template parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
         return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher);
     }

From 3d8c32008eadd928b2e6ae1b8955ed9863b1028e Mon Sep 17 00:00:00 2001
From: Areek Zillur <areek.zillur@elasticsearch.com>
Date: Wed, 2 Dec 2015 13:01:33 -0500
Subject: [PATCH 11/41] [TEST] geo completions work with geo point fields

---
 .../ContextCompletionSuggestSearchIT.java     | 59 +++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
index af336b0cc01..ae6ec51ac36 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java
@@ -562,6 +562,65 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase {
         assertSuggestions("foo", geoNeighbourPrefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5");
     }
 
+    public void testGeoField() throws Exception {
+
+        XContentBuilder mapping = jsonBuilder();
+        mapping.startObject();
+        mapping.startObject(TYPE);
+        mapping.startObject("properties");
+        mapping.startObject("pin");
+        mapping.field("type", "geo_point");
+        mapping.endObject();
+        mapping.startObject(FIELD);
+        mapping.field("type", "completion");
+        mapping.field("analyzer", "simple");
+
+        mapping.startArray("contexts");
+        mapping.startObject();
+        mapping.field("name", "st");
+        mapping.field("type", "geo");
+        mapping.field("path", "pin");
+        mapping.field("precision", 5);
+        mapping.endObject();
+        mapping.endArray();
+
+        mapping.endObject();
+        mapping.endObject();
+        mapping.endObject();
+        mapping.endObject();
+
+        assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping));
+        ensureYellow();
+
+        XContentBuilder source1 = jsonBuilder()
+                .startObject()
+                .latlon("pin", 52.529172, 13.407333)
+                .startObject(FIELD)
+                .array("input", "Hotel Amsterdam in Berlin")
+                .endObject()
+                .endObject();
+        client().prepareIndex(INDEX, TYPE, "1").setSource(source1).execute().actionGet();
+
+        XContentBuilder source2 = jsonBuilder()
+                .startObject()
+                .latlon("pin", 52.363389, 4.888695)
+                .startObject(FIELD)
+                .array("input", "Hotel Berlin in Amsterdam")
+                .endObject()
+                .endObject();
+        client().prepareIndex(INDEX, TYPE, "2").setSource(source2).execute().actionGet();
+
+        refresh();
+
+        String suggestionName = randomAsciiOfLength(10);
+        CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text("h").size(10)
+                .geoContexts("st", GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.52, 13.4)).build());
+        SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(context).get();
+
+        assertEquals(suggestResponse.getSuggest().size(), 1);
+        assertEquals("Hotel Amsterdam in Berlin", suggestResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string());
+    }
+
     public void assertSuggestions(String suggestionName, SuggestBuilder.SuggestionBuilder suggestBuilder, String... suggestions) {
         SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestBuilder
         ).execute().actionGet();

From f8026ed8fd80ece83f33dd7a5d7e66b371dc12a4 Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Wed, 2 Dec 2015 11:03:56 -0800
Subject: [PATCH 12/41] Build: Fix dependency licenses check to correctly skip
 projects without dependencies

---
 .../precommit/DependencyLicensesTask.groovy       | 15 +++++++--------
 test-framework/build.gradle                       |  4 ++++
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
index d0466d7606d..0f6a6f006a0 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
@@ -106,16 +106,15 @@ public class DependencyLicensesTask extends DefaultTask {
 
     @TaskAction
     public void checkDependencies() {
-        // TODO REMOVE THIS DIRTY FIX FOR #15168
-        if (licensesDir.exists() == false) {
-            return
-        }
-        if (licensesDir.exists() == false && dependencies.isEmpty() == false) {
+        if (dependencies.isEmpty()) {
+            if (licensesDir.exists()) {
+                throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
+            }
+            return // no dependencies to check
+        } else if (licensesDir.exists() == false) {
             throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
         }
-        if (licensesDir.exists() && dependencies.isEmpty()) {
-            throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
-        }
+
 
         // order is the same for keys and values iteration since we use a linked hashmap
         List<String> mapped = new ArrayList<>(mappings.values())
diff --git a/test-framework/build.gradle b/test-framework/build.gradle
index 8252df9e3dc..a423f56c922 100644
--- a/test-framework/build.gradle
+++ b/test-framework/build.gradle
@@ -43,3 +43,7 @@ forbiddenApisMain {
   signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'),
                     PrecommitTasks.getResource('/forbidden/test-signatures.txt')]
 }
+
+// TODO: should we have licenses for our test deps?
+dependencyLicenses.enabled = false
+

From 7d25623b1f599bcae601b1ae63970b6a684afd5f Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Wed, 2 Dec 2015 21:59:20 +0100
Subject: [PATCH 13/41] Fix NPE when a segment with an empty cache gets closed.

This is due to the fact that the query cache will still call the
onDocIdSetEviction callback in this case but with a number of entries equal to
zero.

Close #15043
---
 .../cache/query/IndicesQueryCache.java        |  29 +-
 .../cache/query/IndicesQueryCacheTests.java   | 308 ++++++++++++++++++
 2 files changed, 325 insertions(+), 12 deletions(-)
 create mode 100644 core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java

diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java
index 148f7ba8bdb..23b4bc84c44 100644
--- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java
+++ b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java
@@ -150,18 +150,23 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
             protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
                 assert Thread.holdsLock(this);
                 super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
-                // We can't use ShardCoreKeyMap here because its core closed
-                // listener is called before the listener of the cache which
-                // triggers this eviction. So instead we use use stats2 that
-                // we only evict when nothing is cached anymore on the segment
-                // instead of relying on close listeners
-                final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
-                final Stats shardStats = statsAndCount.stats;
-                shardStats.cacheSize -= numEntries;
-                shardStats.ramBytesUsed -= sumRamBytesUsed;
-                statsAndCount.count -= numEntries;
-                if (statsAndCount.count == 0) {
-                    stats2.remove(readerCoreKey);
+                // onDocIdSetEviction might sometimes be called with a number
+                // of entries equal to zero if the cache for the given segment
+                // was already empty when the close listener was called
+                if (numEntries > 0) {
+                    // We can't use ShardCoreKeyMap here because its core closed
+                    // listener is called before the listener of the cache which
+                    // triggers this eviction. So instead we use use stats2 that
+                    // we only evict when nothing is cached anymore on the segment
+                    // instead of relying on close listeners
+                    final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
+                    final Stats shardStats = statsAndCount.stats;
+                    shardStats.cacheSize -= numEntries;
+                    shardStats.ramBytesUsed -= sumRamBytesUsed;
+                    statsAndCount.count -= numEntries;
+                    if (statsAndCount.count == 0) {
+                        stats2.remove(readerCoreKey);
+                    }
                 }
             }
 
diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
new file mode 100644
index 00000000000..8e5f4501fbf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
@@ -0,0 +1,308 @@
+package org.elasticsearch.indices.cache.query;
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryCachingPolicy;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.cache.query.QueryCacheStats;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ESTestCase;
+
+public class IndicesQueryCacheTests extends ESTestCase {
+
+    private static class DummyQuery extends Query {
+
+        private final int id;
+
+        DummyQuery(int id) {
+            this.id = id;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            return super.equals(obj) && id == ((DummyQuery) obj).id;
+        }
+
+        @Override
+        public int hashCode() {
+            return 31 * super.hashCode() + id;
+        }
+
+        @Override
+        public String toString(String field) {
+            return "dummy";
+        }
+
+        @Override
+        public Weight createWeight(IndexSearcher searcher, boolean needsScores)
+                throws IOException {
+            return new ConstantScoreWeight(this) {
+                @Override
+                public Scorer scorer(LeafReaderContext context) throws IOException {
+                    return new ConstantScoreScorer(this, score(), DocIdSetIterator.all(context.reader().maxDoc()));
+                }
+            };
+        }
+
+    }
+
+    public void testBasics() throws IOException {
+        Directory dir = newDirectory();
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+        w.addDocument(new Document());
+        DirectoryReader r = DirectoryReader.open(w, false);
+        w.close();
+        ShardId shard = new ShardId(new Index("index"), 0);
+        r = ElasticsearchDirectoryReader.wrap(r, shard);
+        IndexSearcher s = new IndexSearcher(r);
+        s.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
+
+        Settings settings = Settings.builder()
+                .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT, 10)
+                .build();
+        IndicesQueryCache cache = new IndicesQueryCache(settings);
+        s.setQueryCache(cache);
+
+        QueryCacheStats stats = cache.getStats(shard);
+        assertEquals(0L, stats.getCacheSize());
+        assertEquals(0L, stats.getCacheCount());
+        assertEquals(0L, stats.getHitCount());
+        assertEquals(0L, stats.getMissCount());
+
+        assertEquals(1, s.count(new DummyQuery(0)));
+
+        stats = cache.getStats(shard);
+        assertEquals(1L, stats.getCacheSize());
+        assertEquals(1L, stats.getCacheCount());
+        assertEquals(0L, stats.getHitCount());
+        assertEquals(1L, stats.getMissCount());
+
+        for (int i = 1; i < 20; ++i) {
+            assertEquals(1, s.count(new DummyQuery(i)));
+        }
+
+        stats = cache.getStats(shard);
+        assertEquals(10L, stats.getCacheSize());
+        assertEquals(20L, stats.getCacheCount());
+        assertEquals(0L, stats.getHitCount());
+        assertEquals(20L, stats.getMissCount());
+
+        s.count(new DummyQuery(10));
+
+        stats = cache.getStats(shard);
+        assertEquals(10L, stats.getCacheSize());
+        assertEquals(20L, stats.getCacheCount());
+        assertEquals(1L, stats.getHitCount());
+        assertEquals(20L, stats.getMissCount());
+
+        IOUtils.close(r, dir);
+
+        // got emptied, but no changes to other metrics
+        stats = cache.getStats(shard);
+        assertEquals(0L, stats.getCacheSize());
+        assertEquals(20L, stats.getCacheCount());
+        assertEquals(1L, stats.getHitCount());
+        assertEquals(20L, stats.getMissCount());
+
+        cache.onClose(shard);
+
+        // forgot everything
+        stats = cache.getStats(shard);
+        assertEquals(0L, stats.getCacheSize());
+        assertEquals(0L, stats.getCacheCount());
+        assertEquals(0L, stats.getHitCount());
+        assertEquals(0L, stats.getMissCount());
+
+        cache.close(); // this triggers some assertions
+    }
+
+    public void testTwoShards() throws IOException {
+        Directory dir1 = newDirectory();
+        IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
+        w1.addDocument(new Document());
+        DirectoryReader r1 = DirectoryReader.open(w1, false);
+        w1.close();
+        ShardId shard1 = new ShardId(new Index("index"), 0);
+        r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
+        IndexSearcher s1 = new IndexSearcher(r1);
+        s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
+
+        Directory dir2 = newDirectory();
+        IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
+        w2.addDocument(new Document());
+        DirectoryReader r2 = DirectoryReader.open(w2, false);
+        w2.close();
+        ShardId shard2 = new ShardId(new Index("index"), 1);
+        r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
+        IndexSearcher s2 = new IndexSearcher(r2);
+        s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
+
+        Settings settings = Settings.builder()
+                .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT, 10)
+                .build();
+        IndicesQueryCache cache = new IndicesQueryCache(settings);
+        s1.setQueryCache(cache);
+        s2.setQueryCache(cache);
+
+        assertEquals(1, s1.count(new DummyQuery(0)));
+
+        QueryCacheStats stats1 = cache.getStats(shard1);
+        assertEquals(1L, stats1.getCacheSize());
+        assertEquals(1L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(1L, stats1.getMissCount());
+
+        QueryCacheStats stats2 = cache.getStats(shard2);
+        assertEquals(0L, stats2.getCacheSize());
+        assertEquals(0L, stats2.getCacheCount());
+        assertEquals(0L, stats2.getHitCount());
+        assertEquals(0L, stats2.getMissCount());
+
+        assertEquals(1, s2.count(new DummyQuery(0)));
+
+        stats1 = cache.getStats(shard1);
+        assertEquals(1L, stats1.getCacheSize());
+        assertEquals(1L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(1L, stats1.getMissCount());
+
+        stats2 = cache.getStats(shard2);
+        assertEquals(1L, stats2.getCacheSize());
+        assertEquals(1L, stats2.getCacheCount());
+        assertEquals(0L, stats2.getHitCount());
+        assertEquals(1L, stats2.getMissCount());
+
+        for (int i = 0; i < 20; ++i) {
+            assertEquals(1, s2.count(new DummyQuery(i)));
+        }
+
+        stats1 = cache.getStats(shard1);
+        assertEquals(0L, stats1.getCacheSize()); // evicted
+        assertEquals(1L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(1L, stats1.getMissCount());
+
+        stats2 = cache.getStats(shard2);
+        assertEquals(10L, stats2.getCacheSize());
+        assertEquals(20L, stats2.getCacheCount());
+        assertEquals(1L, stats2.getHitCount());
+        assertEquals(20L, stats2.getMissCount());
+
+        IOUtils.close(r1, dir1);
+
+        // no changes
+        stats1 = cache.getStats(shard1);
+        assertEquals(0L, stats1.getCacheSize());
+        assertEquals(1L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(1L, stats1.getMissCount());
+
+        stats2 = cache.getStats(shard2);
+        assertEquals(10L, stats2.getCacheSize());
+        assertEquals(20L, stats2.getCacheCount());
+        assertEquals(1L, stats2.getHitCount());
+        assertEquals(20L, stats2.getMissCount());
+
+        cache.onClose(shard1);
+
+        // forgot everything about shard1
+        stats1 = cache.getStats(shard1);
+        assertEquals(0L, stats1.getCacheSize());
+        assertEquals(0L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(0L, stats1.getMissCount());
+
+        stats2 = cache.getStats(shard2);
+        assertEquals(10L, stats2.getCacheSize());
+        assertEquals(20L, stats2.getCacheCount());
+        assertEquals(1L, stats2.getHitCount());
+        assertEquals(20L, stats2.getMissCount());
+
+        IOUtils.close(r2, dir2);
+        cache.onClose(shard2);
+
+        // forgot everything about shard2
+        stats1 = cache.getStats(shard1);
+        assertEquals(0L, stats1.getCacheSize());
+        assertEquals(0L, stats1.getCacheCount());
+        assertEquals(0L, stats1.getHitCount());
+        assertEquals(0L, stats1.getMissCount());
+
+        stats2 = cache.getStats(shard2);
+        assertEquals(0L, stats2.getCacheSize());
+        assertEquals(0L, stats2.getCacheCount());
+        assertEquals(0L, stats2.getHitCount());
+        assertEquals(0L, stats2.getMissCount());
+
+        cache.close(); // this triggers some assertions
+    }
+
+    // Make sure the cache behaves correctly when a segment that is associated
+    // with an empty cache gets closed. In that particular case, the eviction
+    // callback is called with a number of evicted entries equal to 0
+    // see https://github.com/elastic/elasticsearch/issues/15043
+    public void testStatsOnEviction() throws IOException {
+        Directory dir1 = newDirectory();
+        IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
+        w1.addDocument(new Document());
+        DirectoryReader r1 = DirectoryReader.open(w1, false);
+        w1.close();
+        ShardId shard1 = new ShardId(new Index("index"), 0);
+        r1 = ElasticsearchDirectoryReader.wrap(r1, shard1);
+        IndexSearcher s1 = new IndexSearcher(r1);
+        s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
+
+        Directory dir2 = newDirectory();
+        IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
+        w2.addDocument(new Document());
+        DirectoryReader r2 = DirectoryReader.open(w2, false);
+        w2.close();
+        ShardId shard2 = new ShardId(new Index("index"), 1);
+        r2 = ElasticsearchDirectoryReader.wrap(r2, shard2);
+        IndexSearcher s2 = new IndexSearcher(r2);
+        s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
+
+        Settings settings = Settings.builder()
+                .put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT, 10)
+                .build();
+        IndicesQueryCache cache = new IndicesQueryCache(settings);
+        s1.setQueryCache(cache);
+        s2.setQueryCache(cache);
+
+        assertEquals(1, s1.count(new DummyQuery(0)));
+
+        for (int i = 1; i <= 20; ++i) {
+            assertEquals(1, s2.count(new DummyQuery(i)));
+        }
+
+        QueryCacheStats stats1 = cache.getStats(shard1);
+        assertEquals(0L, stats1.getCacheSize());
+        assertEquals(1L, stats1.getCacheCount());
+
+        // this used to fail because we were evicting an empty cache on
+        // the segment from r1
+        IOUtils.close(r1, dir1);
+        cache.onClose(shard1);
+
+        IOUtils.close(r2, dir2);
+        cache.onClose(shard2);
+
+        cache.close(); // this triggers some assertions
+    }
+
+}

From ab45d69b16561985152f60d601f44b3004b7ad66 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 2 Dec 2015 16:29:04 -0500
Subject: [PATCH 14/41] Remove unused import in o.e.c.l.Releasables

---
 .../main/java/org/elasticsearch/common/lease/Releasables.java   | 2 --
 1 file changed, 2 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/common/lease/Releasables.java b/core/src/main/java/org/elasticsearch/common/lease/Releasables.java
index c91494a235d..e91bc5c0f71 100644
--- a/core/src/main/java/org/elasticsearch/common/lease/Releasables.java
+++ b/core/src/main/java/org/elasticsearch/common/lease/Releasables.java
@@ -19,8 +19,6 @@
 
 package org.elasticsearch.common.lease;
 
-import org.elasticsearch.ElasticsearchException;
-
 import java.util.Arrays;
 
 /** Utility methods to work with {@link Releasable}s. */

From 5890c9a8136ca47493752e218b080042c99c3d53 Mon Sep 17 00:00:00 2001
From: Ryan <ryan@imacube.net>
Date: Wed, 2 Dec 2015 16:07:57 -0800
Subject: [PATCH 15/41] Update repository-s3.asciidoc

Documentation of AWS VPC public vs. private subnets and their affects on accessing S3.
---
 docs/plugins/repository-s3.asciidoc | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc
index 4c9e93bc714..3441e67f05c 100644
--- a/docs/plugins/repository-s3.asciidoc
+++ b/docs/plugins/repository-s3.asciidoc
@@ -388,3 +388,8 @@ To run test:
 mvn -Dtests.aws=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test
 ----
 
+= AWS VPC Bandwidth Problems
+
+AWS instances resolve S3 endpoints to a public IP. If the elasticsearch instances reside in a private subnet in an AWS VPC then all traffic to S3 will go through that VPC's NAT instance. If your VPC's NAT instance is a smaller instance size (e.g. a t1.micro) or is handling a high volume of network traffic your bandwidth to S3 may be limited by that NAT instance's networking bandwidth limitations.
+
+Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance.

From 1f626d3458898f0e34cb6d9dbc861648c434ddc5 Mon Sep 17 00:00:00 2001
From: Robert Muir <rmuir@apache.org>
Date: Thu, 3 Dec 2015 01:45:06 -0500
Subject: [PATCH 16/41] forbidden third-party-signatures -> core-signatures

This is a relic from shading where it was trickier to implement.
Third party signatures are already in e.g. the test list, there
is no reason to separate them out.

Instead, we could have a third party signatures that does
something different... like keep tabs on third party libraries.
---
 .../gradle/precommit/PrecommitTasks.groovy    |  4 +---
 .../resources/forbidden/core-signatures.txt   |  9 +++++++
 .../forbidden/third-party-signatures.txt      | 24 -------------------
 3 files changed, 10 insertions(+), 27 deletions(-)
 delete mode 100644 buildSrc/src/main/resources/forbidden/third-party-signatures.txt

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
index 8b1a98139ba..04878d979e9 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
@@ -68,9 +68,7 @@ class PrecommitTasks {
         if (mainForbidden != null) {
             mainForbidden.configure {
                 bundledSignatures += 'jdk-system-out'
-                signaturesURLs += [
-                        getClass().getResource('/forbidden/core-signatures.txt'),
-                        getClass().getResource('/forbidden/third-party-signatures.txt')]
+                signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
             }
         }
         Task testForbidden = project.tasks.findByName('forbiddenApisTest')
diff --git a/buildSrc/src/main/resources/forbidden/core-signatures.txt b/buildSrc/src/main/resources/forbidden/core-signatures.txt
index 0f54946ea37..c6ab430595c 100644
--- a/buildSrc/src/main/resources/forbidden/core-signatures.txt
+++ b/buildSrc/src/main/resources/forbidden/core-signatures.txt
@@ -90,3 +90,12 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI)
 
 @defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
 org.apache.lucene.search.Query#setBoost(float)
+
+@defaultMessage Constructing a DateTime without a time zone is dangerous
+org.joda.time.DateTime#<init>()
+org.joda.time.DateTime#<init>(long)
+org.joda.time.DateTime#<init>(int, int, int, int, int)
+org.joda.time.DateTime#<init>(int, int, int, int, int, int)
+org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
+org.joda.time.DateTime#now()
+org.joda.time.DateTimeZone#getDefault()
diff --git a/buildSrc/src/main/resources/forbidden/third-party-signatures.txt b/buildSrc/src/main/resources/forbidden/third-party-signatures.txt
deleted file mode 100644
index 93e0eb043b5..00000000000
--- a/buildSrc/src/main/resources/forbidden/third-party-signatures.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to Elasticsearch under one or more contributor
-# license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright
-# ownership. Elasticsearch licenses this file to you under
-# the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance  with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on
-# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-# either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-
-@defaultMessage Constructing a DateTime without a time zone is dangerous
-org.joda.time.DateTime#<init>()
-org.joda.time.DateTime#<init>(long)
-org.joda.time.DateTime#<init>(int, int, int, int, int)
-org.joda.time.DateTime#<init>(int, int, int, int, int, int)
-org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
-org.joda.time.DateTime#now()
-org.joda.time.DateTimeZone#getDefault()

From 612ef4488edeafc8f0d0545e0c6a17f415edb8d3 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Thu, 3 Dec 2015 09:41:06 +0100
Subject: [PATCH 17/41] Add missing license header.

---
 .../cache/query/IndicesQueryCacheTests.java   | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
index 8e5f4501fbf..c25b20699aa 100644
--- a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
@@ -1,3 +1,22 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
 package org.elasticsearch.indices.cache.query;
 
 import java.io.IOException;

From 9930e6883d1365ed082effbb52ffb0c22410f0ab Mon Sep 17 00:00:00 2001
From: Boaz Leskes <b.leskes@gmail.com>
Date: Tue, 1 Dec 2015 15:05:58 +0100
Subject: [PATCH 18/41] Move IndicesService.canDeleteShardContent to use
 IndexSettings

Just a minor cleanup/simplification

Closes #15059
Closes #15150
---
 .../elasticsearch/indices/IndicesService.java    | 16 ++++------------
 .../indices/store/IndicesStore.java              |  5 ++++-
 .../indices/IndicesServiceTests.java             |  7 ++++---
 3 files changed, 12 insertions(+), 16 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index ad1cd399c36..dead72aee8b 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -461,7 +461,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
 
     /**
      * This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting
-     * is prevented by {@link #canDeleteShardContent(org.elasticsearch.index.shard.ShardId, org.elasticsearch.cluster.metadata.IndexMetaData)}
+     * is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)}
      * of if the shards lock can not be acquired.
      *
      * On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove the index folder as well.
@@ -529,18 +529,10 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
      * </ul>
      *
      * @param shardId the shard to delete.
-     * @param metaData the shards index metadata. This is required to access the indexes settings etc.
+     * @param indexSettings the shards's relevant {@link IndexSettings}. This is required to access the indexes settings etc.
      */
-    public boolean canDeleteShardContent(ShardId shardId, IndexMetaData metaData) {
-        // we need the metadata here since we have to build the complete settings
-        // to decide where the shard content lives. In the future we might even need more info here ie. for shadow replicas
-        // The plan was to make it harder to miss-use and ask for metadata instead of simple settings
-        assert shardId.getIndex().equals(metaData.getIndex());
-        final IndexSettings indexSettings = buildIndexSettings(metaData);
-        return canDeleteShardContent(shardId, indexSettings);
-    }
-
-    private boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
+    public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
+        assert shardId.getIndex().equals(indexSettings.getIndex().name());
         final IndexService indexService = this.indices.get(shardId.getIndex());
         if (indexSettings.isOnSharedFilesystem() == false) {
             if (indexService != null && nodeEnv.hasNodeFile()) {
diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
index 3e463413052..45f2f91b0be 100644
--- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
+++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
@@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
 import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.shard.IndexShard;
 import org.elasticsearch.index.shard.IndexShardState;
 import org.elasticsearch.index.shard.ShardId;
@@ -43,6 +44,7 @@ import org.elasticsearch.transport.*;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -97,11 +99,12 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
         }
 
         for (IndexRoutingTable indexRoutingTable : event.state().routingTable()) {
+            IndexSettings indexSettings = new IndexSettings(event.state().getMetaData().index(indexRoutingTable.index()), settings, Collections.emptyList());
             // Note, closed indices will not have any routing information, so won't be deleted
             for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
                 if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) {
                     ShardId shardId = indexShardRoutingTable.shardId();
-                    if (indicesService.canDeleteShardContent(shardId, event.state().getMetaData().index(shardId.getIndex()))) {
+                    if (indicesService.canDeleteShardContent(shardId, indexSettings)) {
                         deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable);
                     }
                 }
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
index d4051148029..522ebfb0f3b 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java
@@ -72,12 +72,13 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
         IndicesService indicesService = getIndicesService();
         IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(
                 1).build();
-        assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+        IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", meta.getSettings());
+        assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", 0), indexSettings));
         IndexService test = createIndex("test");
         assertTrue(test.hasShard(0));
-        assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+        assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", 0), indexSettings));
         test.removeShard(0, "boom");
-        assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+        assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", 0), indexSettings));
     }
 
     public void testDeleteIndexStore() throws Exception {

From c5301e853a6f686903e221f2cf88e23da9225f81 Mon Sep 17 00:00:00 2001
From: Boaz Leskes <b.leskes@gmail.com>
Date: Thu, 3 Dec 2015 10:00:48 +0100
Subject: [PATCH 19/41] reduced the number of shards in
 RecoveryWhileUnderLoadIT.testRecoveryWhileRelocating

---
 .../org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
index da6e5ed934c..7095639eafc 100644
--- a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java
@@ -223,7 +223,7 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase {
     }
 
     public void testRecoverWhileRelocating() throws Exception {
-        final int numShards = between(2, 10);
+        final int numShards = between(2, 5);
         final int numReplicas = 0;
         logger.info("--> creating test index ...");
         int allowNodes = 2;

From d0fbd9d1c4977b785c060cb5326d444f3d62c94e Mon Sep 17 00:00:00 2001
From: Martijn van Groningen <martijn.v.groningen@gmail.com>
Date: Wed, 2 Dec 2015 15:31:10 +0100
Subject: [PATCH 20/41] index template: Disallow index template pattern to be
 the same as an alias name

This can cause index creation to fail down the line.
---
 .../MetaDataIndexTemplateService.java         |  3 +++
 .../MetaDataIndexTemplateServiceTests.java    | 22 ++++++++++++++-----
 2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
index 3d7d19b27b9..790cb99c64b 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java
@@ -217,6 +217,9 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
         for (Alias alias : request.aliases) {
             //we validate the alias only partially, as we don't know yet to which index it'll get applied to
             aliasValidator.validateAliasStandalone(alias);
+            if (request.template.equals(alias.name())) {
+                throw new IllegalArgumentException("Alias [" + alias.name() + "] cannot be the same as the template pattern [" + request.template + "]");
+            }
         }
     }
 
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
index f983d885124..c642bdb1e79 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java
@@ -20,6 +20,8 @@
 package org.elasticsearch.action.admin.indices.template.put;
 
 import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.cluster.metadata.AliasValidator;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
 import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
@@ -28,13 +30,10 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.indices.InvalidIndexTemplateException;
 import org.elasticsearch.test.ESTestCase;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 
 import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.instanceOf;
 
 public class MetaDataIndexTemplateServiceTests extends ESTestCase {
@@ -68,6 +67,17 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
         assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
     }
 
+    public void testIndexTemplateWithAliasNameEqualToTemplatePattern() {
+        PutRequest request = new PutRequest("api", "foobar_template");
+        request.template("foobar");
+        request.aliases(Collections.singleton(new Alias("foobar")));
+
+        List<Throwable> errors = putTemplate(request);
+        assertThat(errors.size(), equalTo(1));
+        assertThat(errors.get(0), instanceOf(IllegalArgumentException.class));
+        assertThat(errors.get(0).getMessage(), equalTo("Alias [foobar] cannot be the same as the template pattern [foobar]"));
+    }
+
     private static List<Throwable> putTemplate(PutRequest request) {
         MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(
                 Settings.EMPTY,
@@ -79,7 +89,7 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
                 new HashSet<>(),
                 null,
                 null);
-        MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, null);
+        MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY));
 
         final List<Throwable> throwables = new ArrayList<>();
         service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {

From e84b61358f35d622c1da53330f5140b011053dbb Mon Sep 17 00:00:00 2001
From: Clinton Gormley <clint@traveljury.com>
Date: Thu, 3 Dec 2015 14:24:19 +0100
Subject: [PATCH 21/41] Fix version order for breaking changes docs

---
 docs/reference/migration/index.asciidoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc
index d200cab3f0b..56c000c3d9a 100644
--- a/docs/reference/migration/index.asciidoc
+++ b/docs/reference/migration/index.asciidoc
@@ -18,10 +18,10 @@ See <<setup-upgrade>> for more info.
 --
 include::migrate_3_0.asciidoc[]
 
-include::migrate_2_1.asciidoc[]
-
 include::migrate_2_2.asciidoc[]
 
+include::migrate_2_1.asciidoc[]
+
 include::migrate_2_0.asciidoc[]
 
 include::migrate_1_6.asciidoc[]

From 310e98f51d14a18b37d5a9901cb304fcf70dc15f Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 08:59:56 -0500
Subject: [PATCH 22/41] Update stale Javadoc in
 MetaDataMappingService#executeRefresh

This commit updates a stale Javadoc on
MetaDataMappingService#executeRefresh. Previously this method handled
refresh and update tasks. Update tasks have been removed and the method
was renamed, but the Javadoc was not updated to reflect this.
---
 .../elasticsearch/cluster/metadata/MetaDataMappingService.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 854d55020dd..5dd638965e3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -85,7 +85,7 @@ public class MetaDataMappingService extends AbstractComponent {
     }
 
     /**
-     * Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
+     * Batch method to apply all the queued refresh operations. The idea is to try and batch as much
      * as possible so we won't create the same index all the time for example for the updates on the same mapping
      * and generate a single cluster change event out of all of those.
      */

From d1fd1fb908d12a7b83969fe788987ed44db07ec4 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 09:02:04 -0500
Subject: [PATCH 23/41] Remove unnecessary early-out in
 MetaDataMappingService#executeRefresh

This commit removes a simple early-out check in
MetaDataMappingService#executeRefresh. The early-out is unnecessary
because the cluster state task execution framework will not invoke
ClusterStateTaskExecutor#execute if the list of tasks is empty.
---
 .../cluster/metadata/MetaDataMappingService.java              | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 5dd638965e3..4eac6594749 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -90,10 +90,6 @@ public class MetaDataMappingService extends AbstractComponent {
      * and generate a single cluster change event out of all of those.
      */
     ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
-        if (allTasks.isEmpty()) {
-            return currentState;
-        }
-
         // break down to tasks per index, so we can optimize the on demand index service creation
         // to only happen for the duration of a single index processing of its respective events
         Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();

From 3ff91baedf13711ee3609e2c93f922f85046734d Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Thu, 3 Dec 2015 16:06:51 +0100
Subject: [PATCH 24/41] Simplify MetaDataMappingService.

Now that we create all types on indices that receive a mapping update, creation
of temporary indices could get simpler.
---
 .../metadata/MetaDataMappingService.java      | 21 +++++++------------
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 854d55020dd..5300e69ad95 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -212,21 +212,14 @@ public class MetaDataMappingService extends AbstractComponent {
                 for (PutMappingClusterStateUpdateRequest request : tasks) {
                     // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
                     for (String index : request.indices()) {
-                        if (currentState.metaData().hasIndex(index)) {
+                        final IndexMetaData indexMetaData = currentState.metaData().index(index);
+                        if (indexMetaData != null  && indicesService.hasIndex(index) == false) {
                             // if we don't have the index, we will throw exceptions later;
-                            if (indicesService.hasIndex(index) == false || indicesToClose.contains(index)) {
-                                final IndexMetaData indexMetaData = currentState.metaData().index(index);
-                                IndexService indexService;
-                                if (indicesService.hasIndex(index) == false) {
-                                    indicesToClose.add(index);
-                                    indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
-                                    // add mappings for all types, we need them for cross-type validation
-                                    for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
-                                        indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
-                                    }
-                                } else {
-                                    indexService = indicesService.indexService(index);
-                                }
+                            indicesToClose.add(index);
+                            IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
+                            // add mappings for all types, we need them for cross-type validation
+                            for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
+                                indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
                             }
                         }
                     }

From 94027f146118fb74f75c673361f714412854026f Mon Sep 17 00:00:00 2001
From: Ryan <ryan@imacube.net>
Date: Thu, 3 Dec 2015 09:27:20 -0800
Subject: [PATCH 25/41] Made requested changes

Moved section to before testing section and made requested formatting changes.
---
 docs/plugins/repository-s3.asciidoc | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc
index 3441e67f05c..16505885446 100644
--- a/docs/plugins/repository-s3.asciidoc
+++ b/docs/plugins/repository-s3.asciidoc
@@ -343,6 +343,14 @@ to your URL provider. Note that this setting will be used for all S3 repositorie
 Different `endpoint`, `region` and `protocol` settings can be set on a per-repository basis
 See <<repository-s3-repository>> for details.
 
+[[repository-s3-aws-vpc]]
+[float]
+==== AWS VPC Bandwidth Settings
+
+AWS instances resolve S3 endpoints to a public IP. If the elasticsearch instances reside in a private subnet in an AWS VPC then all traffic to S3 will go through that VPC's NAT instance. If your VPC's NAT instance is a smaller instance size (e.g. a t1.micro) or is handling a high volume of network traffic your bandwidth to S3 may be limited by that NAT instance's networking bandwidth limitations.
+
+Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance.
+
 [[repository-s3-testing]]
 ==== Testing AWS
 
@@ -387,9 +395,3 @@ To run test:
 ----
 mvn -Dtests.aws=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test
 ----
-
-= AWS VPC Bandwidth Problems
-
-AWS instances resolve S3 endpoints to a public IP. If the elasticsearch instances reside in a private subnet in an AWS VPC then all traffic to S3 will go through that VPC's NAT instance. If your VPC's NAT instance is a smaller instance size (e.g. a t1.micro) or is handling a high volume of network traffic your bandwidth to S3 may be limited by that NAT instance's networking bandwidth limitations.
-
-Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance.

From 73a2d3085ab4133e7117c73e363c14af19f44cd0 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 13:41:34 -0500
Subject: [PATCH 26/41] Rename variable for clarity in
 ShardFailedClusterStateHandler#execute

---
 .../cluster/action/shard/ShardStateAction.java            | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index da014e15b6b..a01f601ba12 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -152,7 +152,7 @@ public class ShardStateAction extends AbstractComponent {
     class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
         @Override
         public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
-            BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
+            BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
             List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
             for (ShardRoutingEntry task : tasks) {
                 task.processed = true;
@@ -164,11 +164,11 @@ public class ShardStateAction extends AbstractComponent {
                 if (result.changed()) {
                     maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
                 }
-                builder.successes(tasks);
+                batchResultBuilder.successes(tasks);
             } catch (Throwable t) {
-                builder.failures(tasks, t);
+                batchResultBuilder.failures(tasks, t);
             }
-            return builder.build(maybeUpdatedState);
+            return batchResultBuilder.build(maybeUpdatedState);
         }
 
         @Override

From b70d97f36b0d4e9905a98f2bb98a5fe6ee668976 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 13:42:05 -0500
Subject: [PATCH 27/41] Remove unnecessary method in AllocationService

---
 .../cluster/routing/allocation/AllocationService.java       | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index e19d51981a6..f819d6fde0a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -98,11 +98,7 @@ public class AllocationService extends AbstractComponent {
     }
 
     public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
-        return applyFailedShard(clusterState, new FailedRerouteAllocation.FailedShard(failedShard, null, null));
-    }
-
-    public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, FailedRerouteAllocation.FailedShard failedShard) {
-        return applyFailedShards(clusterState, Collections.singletonList(failedShard));
+        return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
     }
 
     /**

From 99eac0e7d78d54340dbb2e89432a9d50b3b41547 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Wed, 25 Nov 2015 11:31:43 -0500
Subject: [PATCH 28/41] Use general cluster state batching mechanism for shard
 started

This commit modifies the handling of shard started cluster state updates
to use the general cluster state batching mechanism. An advantage of
this approach is we now get correct per-listener notification on
failures.
---
 .../action/shard/ShardStateAction.java        | 86 ++++++++-----------
 .../routing/allocation/AllocationService.java | 16 ++++
 2 files changed, 50 insertions(+), 52 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index a01f601ba12..b4048407841 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -36,14 +36,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.BlockingQueue;
 
 import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
 
@@ -60,8 +58,6 @@ public class ShardStateAction extends AbstractComponent {
     private final AllocationService allocationService;
     private final RoutingService routingService;
 
-    private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
-
     @Inject
     public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
                             AllocationService allocationService, RoutingService routingService) {
@@ -185,60 +181,46 @@ public class ShardStateAction extends AbstractComponent {
         }
     }
 
+    private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
+            new ShardStartedClusterStateHandler();
+
     private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
         logger.debug("received shard started for {}", shardRoutingEntry);
-        // buffer shard started requests, and the state update tasks will simply drain it
-        // this is to optimize the number of "started" events we generate, and batch them
-        // possibly, we can do time based batching as well, but usually, we would want to
-        // process started events as fast as possible, to make shards available
-        startedShardsQueue.add(shardRoutingEntry);
 
-        clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
-                new ClusterStateUpdateTask() {
-                    @Override
-                    public Priority priority() {
-                        return Priority.URGENT;
+        clusterService.submitStateUpdateTask(
+                "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
+                shardRoutingEntry,
+                ClusterStateTaskConfig.build(Priority.URGENT),
+                shardStartedClusterStateHandler,
+                shardStartedClusterStateHandler);
+    }
+
+    class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
+        @Override
+        public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
+            BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
+            ClusterState accumulator = ClusterState.builder(currentState).build();
+            for (ShardRoutingEntry task : tasks) {
+                task.processed = true;
+                try {
+                    RoutingAllocation.Result result =
+                            allocationService.applyStartedShard(currentState, task.shardRouting, true);
+                    builder.success(task);
+                    if (result.changed()) {
+                        accumulator = ClusterState.builder(accumulator).routingResult(result).build();
                     }
+                } catch (Throwable t) {
+                    builder.failure(task, t);
+                }
+            }
 
-                    @Override
-                    public ClusterState execute(ClusterState currentState) {
+            return builder.build(accumulator);
+        }
 
-                        if (shardRoutingEntry.processed) {
-                            return currentState;
-                        }
-
-                        List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
-                        startedShardsQueue.drainTo(shardRoutingEntries);
-
-                        // nothing to process (a previous event has processed it already)
-                        if (shardRoutingEntries.isEmpty()) {
-                            return currentState;
-                        }
-
-                        List<ShardRouting> shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size());
-
-                        // mark all entries as processed
-                        for (ShardRoutingEntry entry : shardRoutingEntries) {
-                            entry.processed = true;
-                            shardRoutingToBeApplied.add(entry.shardRouting);
-                        }
-
-                        if (shardRoutingToBeApplied.isEmpty()) {
-                            return currentState;
-                        }
-
-                        RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
-                        if (!routingResult.changed()) {
-                            return currentState;
-                        }
-                        return ClusterState.builder(currentState).routingResult(routingResult).build();
-                    }
-
-                    @Override
-                    public void onFailure(String source, Throwable t) {
-                        logger.error("unexpected failure during [{}]", t, source);
-                    }
-                });
+        @Override
+        public void onFailure(String source, Throwable t) {
+            logger.error("unexpected failure during [{}]", t, source);
+        }
     }
 
     private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index f819d6fde0a..af17c839582 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -63,6 +63,22 @@ public class AllocationService extends AbstractComponent {
         this.clusterInfoService = clusterInfoService;
     }
 
+    /**
+     * Applies the started shard. Note, shards can be called several
+     * times within this method. If the same instance of the routing
+     * table is returned, then no change has been made.
+     * @param clusterState the cluster state
+     * @param startedShard the shard to start
+     * @param withReroute whether or not to reroute the resulting allocation
+     * @return the resulting routing table
+     */
+    public RoutingAllocation.Result applyStartedShard(
+            ClusterState clusterState,
+            ShardRouting startedShard,
+            boolean withReroute) {
+        return applyStartedShards(clusterState, Collections.singletonList(startedShard), withReroute);
+    }
+
     /**
      * Applies the started shards. Note, shards can be called several times within this method.
      * <p>

From 928d53a884b5bfc6248b6db97198da25666091d5 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Tue, 1 Dec 2015 11:36:29 -0500
Subject: [PATCH 29/41] Apply shard starts in a single batch

---
 .../action/shard/ShardStateAction.java        | 24 ++++++++++---------
 .../routing/allocation/AllocationService.java | 16 -------------
 2 files changed, 13 insertions(+), 27 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index b4048407841..a74b2ca5ed8 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -199,22 +199,24 @@ public class ShardStateAction extends AbstractComponent {
         @Override
         public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
             BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
-            ClusterState accumulator = ClusterState.builder(currentState).build();
+            List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
             for (ShardRoutingEntry task : tasks) {
                 task.processed = true;
-                try {
-                    RoutingAllocation.Result result =
-                            allocationService.applyStartedShard(currentState, task.shardRouting, true);
-                    builder.success(task);
-                    if (result.changed()) {
-                        accumulator = ClusterState.builder(accumulator).routingResult(result).build();
-                    }
-                } catch (Throwable t) {
-                    builder.failure(task, t);
+                shardRoutingsToBeApplied.add(task.shardRouting);
+            }
+            ClusterState maybeUpdatedState = currentState;
+            try {
+                RoutingAllocation.Result result =
+                    allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
+                if (result.changed()) {
+                    maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
                 }
+                builder.successes(tasks);
+            } catch (Throwable t) {
+                builder.failures(tasks, t);
             }
 
-            return builder.build(accumulator);
+            return builder.build(maybeUpdatedState);
         }
 
         @Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index af17c839582..f819d6fde0a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -63,22 +63,6 @@ public class AllocationService extends AbstractComponent {
         this.clusterInfoService = clusterInfoService;
     }
 
-    /**
-     * Applies the started shard. Note, shards can be called several
-     * times within this method. If the same instance of the routing
-     * table is returned, then no change has been made.
-     * @param clusterState the cluster state
-     * @param startedShard the shard to start
-     * @param withReroute whether or not to reroute the resulting allocation
-     * @return the resulting routing table
-     */
-    public RoutingAllocation.Result applyStartedShard(
-            ClusterState clusterState,
-            ShardRouting startedShard,
-            boolean withReroute) {
-        return applyStartedShards(clusterState, Collections.singletonList(startedShard), withReroute);
-    }
-
     /**
      * Applies the started shards. Note, shards can be called several times within this method.
      * <p>

From b58d82f66c8f8f6490268122fd3e8dbf3c6b08ae Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 14:08:35 -0500
Subject: [PATCH 30/41] Remove obsolete flag in
 ShardStateAction$ShardRoutingEntry

---
 .../elasticsearch/cluster/action/shard/ShardStateAction.java  | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index a74b2ca5ed8..d09df094a68 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -151,7 +151,6 @@ public class ShardStateAction extends AbstractComponent {
             BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
             List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
             for (ShardRoutingEntry task : tasks) {
-                task.processed = true;
                 shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
             }
             ClusterState maybeUpdatedState = currentState;
@@ -201,7 +200,6 @@ public class ShardStateAction extends AbstractComponent {
             BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
             List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
             for (ShardRoutingEntry task : tasks) {
-                task.processed = true;
                 shardRoutingsToBeApplied.add(task.shardRouting);
             }
             ClusterState maybeUpdatedState = currentState;
@@ -250,8 +248,6 @@ public class ShardStateAction extends AbstractComponent {
         String message;
         Throwable failure;
 
-        volatile boolean processed; // state field, no need to serialize
-
         public ShardRoutingEntry() {
         }
 

From de0e1f5e2f5cc737e643a29719b7df8582c62619 Mon Sep 17 00:00:00 2001
From: Jason Tedor <jason@tedor.me>
Date: Thu, 3 Dec 2015 14:31:41 -0500
Subject: [PATCH 31/41] Avoid trace logging allocations in
 TransportBroadcastByNodeAction

This commit wraps the trace logging statements in
TransportBroadcastByNodeAction in trace enabled checks to avoid
unnecessarily allocating objects.

The most egregious offenders were the two trace logging statements in
BroadcastByNodeTransportRequestHandler#onShardOperation. Aside from the
usual object allocations that occur when invoking ESLogger#trace (the
allocated object array for the varargs Object... parameter), these two
logging statements were invoking ShardRouting#shortSummary generating a
bunch of char arrays and Strings (from the StringBuilder, and so a bunch
of array copies as well). In a scenario where there are a lot of shards
and this method is being invoked frequently (e.g., constantly hitting
the _stats endpoint), these two unprotected trace logging statements
were generating a lot of unnecessary allocations.
---
 .../node/TransportBroadcastByNodeAction.java  | 20 ++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
index bc78f13433f..8bcba8ad544 100644
--- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
@@ -223,7 +223,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
                 throw requestBlockException;
             }
 
-            logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
+            if (logger.isTraceEnabled()) {
+                logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
+            }
             ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
             nodeIds = new HashMap<>();
 
@@ -300,7 +302,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
         }
 
         protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
-            logger.trace("received response for [{}] from node [{}]", actionName, node.id());
+            if (logger.isTraceEnabled()) {
+                logger.trace("received response for [{}] from node [{}]", actionName, node.id());
+            }
 
             // this is defensive to protect against the possibility of double invocation
             // the current implementation of TransportService#sendRequest guards against this
@@ -351,7 +355,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
         public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
             List<ShardRouting> shards = request.getShards();
             final int totalShards = shards.size();
-            logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
+            if (logger.isTraceEnabled()) {
+                logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
+            }
             final Object[] shardResultOrExceptions = new Object[totalShards];
 
             int shardIndex = -1;
@@ -375,10 +381,14 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
 
         private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
             try {
-                logger.trace("[{}]  executing operation for shard [{}]", actionName, shardRouting.shortSummary());
+                if (logger.isTraceEnabled()) {
+                    logger.trace("[{}]  executing operation for shard [{}]", actionName, shardRouting.shortSummary());
+                }
                 ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
                 shardResults[shardIndex] = result;
-                logger.trace("[{}]  completed operation for shard [{}]", actionName, shardRouting.shortSummary());
+                if (logger.isTraceEnabled()) {
+                    logger.trace("[{}]  completed operation for shard [{}]", actionName, shardRouting.shortSummary());
+                }
             } catch (Throwable t) {
                 BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
                 e.setIndex(shardRouting.getIndex());

From 17e6195fd605be8f34d3af857aeb6c7d0ffea697 Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Thu, 3 Dec 2015 15:20:41 -0800
Subject: [PATCH 32/41] Build: Fix updateShas to not barf on disabled license
 checks and even compile correctly

These were just results of not testing properly after refactoring.

closes #15224
---
 .../gradle/precommit/DependencyLicensesTask.groovy           | 2 +-
 .../org/elasticsearch/gradle/precommit/UpdateShasTask.groovy | 5 +++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
index 0f6a6f006a0..e2f10100269 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy
@@ -69,7 +69,7 @@ import java.util.regex.Pattern
  * </pre>
  */
 public class DependencyLicensesTask extends DefaultTask {
-    private static final String SHA_EXTENSION = '.sha1'
+    static final String SHA_EXTENSION = '.sha1'
 
     // TODO: we should be able to default this to eg compile deps, but we need to move the licenses
     // check from distribution to core (ie this should only be run on java projects)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy
index d0c73e6ad76..4a174688aa1 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy
@@ -35,6 +35,7 @@ public class UpdateShasTask extends DefaultTask {
 
     public UpdateShasTask() {
         description = 'Updates the sha files for the dependencyLicenses check'
+        onlyIf { parentTask.licensesDir.exists() }
     }
 
     @TaskAction
@@ -42,13 +43,13 @@ public class UpdateShasTask extends DefaultTask {
         Set<File> shaFiles = new HashSet<File>()
         parentTask.licensesDir.eachFile {
             String name = it.getName()
-            if (name.endsWith(SHA_EXTENSION)) {
+            if (name.endsWith(DependencyLicensesTask.SHA_EXTENSION)) {
                 shaFiles.add(it)
             }
         }
         for (File dependency : parentTask.dependencies) {
             String jarName = dependency.getName()
-            File shaFile = new File(parentTask.licensesDir, jarName + SHA_EXTENSION)
+            File shaFile = new File(parentTask.licensesDir, jarName + DependencyLicensesTask.SHA_EXTENSION)
             if (shaFile.exists() == false) {
                 logger.lifecycle("Adding sha for ${jarName}")
                 String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()

From 47c5da523fb9cc9ac53bba61108a5e12f80dfeaa Mon Sep 17 00:00:00 2001
From: Gao Yingkai <popol.subscribe@gmail.com>
Date: Thu, 3 Dec 2015 20:24:20 -0500
Subject: [PATCH 33/41] Update scripting.asciidoc

Fixed minor typo in the example of native scripting request.
---
 docs/reference/modules/scripting.asciidoc | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc
index 982097cdf1c..aea7846202e 100644
--- a/docs/reference/modules/scripting.asciidoc
+++ b/docs/reference/modules/scripting.asciidoc
@@ -425,8 +425,10 @@ curl -XPOST localhost:9200/_search -d '{
       "functions": [
         {
           "script_score": {
-            "id": "my_script",
-            "lang" : "native"
+            "script": {
+                "id": "my_script",
+                "lang" : "native"
+            }
           }
         }
       ]

From f78c732ca912233318c1c4a66e508a981020283e Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Thu, 3 Dec 2015 16:59:35 -0800
Subject: [PATCH 34/41] Cli tools: Use toString instead of getMessage for
 exceptions

When not in debug mode, we currently only print the message of an
exception. However, this is not usually useful without knowing what the
exception type was. This change makes cli tools use toString() on the
exception so we get the type + message.
---
 .../elasticsearch/common/cli/Terminal.java    |  2 +-
 .../common/cli/TerminalTests.java             | 23 ++++++++++++++++++-
 2 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java b/core/src/main/java/org/elasticsearch/common/cli/Terminal.java
index 583eb9cc3a9..9523115b024 100644
--- a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java
+++ b/core/src/main/java/org/elasticsearch/common/cli/Terminal.java
@@ -116,7 +116,7 @@ public abstract class Terminal {
     }
 
     public void printError(Throwable t) {
-        printError("%s", t.getMessage());
+        printError("%s", t.toString());
         if (isDebugEnabled) {
             printStackTrace(t);
         }
diff --git a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java
index f49e1f3a16e..dcbbc1ed337 100644
--- a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java
+++ b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java
@@ -19,6 +19,9 @@
 
 package org.elasticsearch.common.cli;
 
+import java.nio.file.NoSuchFileException;
+import java.util.List;
+
 import static org.hamcrest.Matchers.hasItem;
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.is;
@@ -44,10 +47,28 @@ public class TerminalTests extends CliToolTestCase {
         assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
     }
 
+    public void testError() throws Exception {
+        try {
+            // actually throw so we have a stacktrace
+            throw new NoSuchFileException("/path/to/some/file");
+        } catch (NoSuchFileException e) {
+            CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
+            terminal.printError(e);
+            List<String> output = terminal.getTerminalOutput();
+            assertFalse(output.isEmpty());
+            assertTrue(output.get(0), output.get(0).contains("NoSuchFileException")); // exception class
+            assertTrue(output.get(0), output.get(0).contains("/path/to/some/file")); // message
+            assertEquals(1, output.size());
+
+            // TODO: we should test stack trace is printed in debug mode...except debug is a sysprop instead of
+            // a command line param...maybe it should be VERBOSE instead of a separate debug prop?
+        }
+    }
+
     private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {
         logTerminal.print(verbosity, text);
         assertThat(logTerminal.getTerminalOutput(), hasSize(1));
-        assertThat(logTerminal.getTerminalOutput(), hasItem(is("text")));
+        assertThat(logTerminal.getTerminalOutput(), hasItem(text));
         logTerminal.terminalOutput.clear();
     }
 

From 0be141e021494c96b9be8c97c47f964fa36cce4b Mon Sep 17 00:00:00 2001
From: Ryan Ernst <ryan@iernst.net>
Date: Thu, 3 Dec 2015 19:53:50 -0800
Subject: [PATCH 35/41] Build: Add .settings for buildSrc on gradle eclipse

We have eclipse settings added to all projects when running gradle
eclipse, but buildSrc is its own special project that is not
encapsulated by allprojects blocks. This adds eclipse settings to
buildSrc.
---
 buildSrc/build.gradle | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index 0fa713fe371..e46f9cb33c0 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -80,3 +80,13 @@ eclipse {
     defaultOutputDir = new File(file('build'), 'eclipse')
   }
 }
+
+task copyEclipseSettings(type: Copy) {
+  from project.file('src/main/resources/eclipse.settings')
+  into '.settings'
+}
+// otherwise .settings is not nuked entirely
+tasks.cleanEclipse {
+  delete '.settings'
+}
+tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)

From 3cede749f954b42ec98b9ae32e5263d69956fdec Mon Sep 17 00:00:00 2001
From: Ben Tse <ben.tse@gmail.com>
Date: Thu, 3 Dec 2015 23:53:48 -0500
Subject: [PATCH 36/41] fixed minor typo

---
 docs/reference/mapping/params/doc-values.asciidoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/reference/mapping/params/doc-values.asciidoc b/docs/reference/mapping/params/doc-values.asciidoc
index ea8dfd08bb7..a0108b899b9 100644
--- a/docs/reference/mapping/params/doc-values.asciidoc
+++ b/docs/reference/mapping/params/doc-values.asciidoc
@@ -9,7 +9,7 @@ of documents that contain the term.
 Sorting, aggregations, and access to field values in scripts requires a
 different data access pattern.  Instead of lookup up the term and finding
 documents, we need to be able to look up the document and find the terms that
-is has in a field.
+it has in a field.
 
 Doc values are the on-disk data structure, built at document index time, which
 makes this data access pattern possible. They store the same values as the

From 342665300b7b2f7eee16d6b2dae800294510bba9 Mon Sep 17 00:00:00 2001
From: Yannick Welsch <yannick@elastic.co>
Date: Fri, 4 Dec 2015 11:24:22 +0100
Subject: [PATCH 37/41] Add capabilities for serializing map-based cluster
 state diffs

- Supports ImmutableOpenIntMap besides java.util.Map and ImmutableOpenMap
- Map keys can be any value (not only String)
- Map values do not have to implement Diffable interface. In that case custom value serializer needs to be provided.
---
 .../java/org/elasticsearch/cluster/Diff.java  |   2 +-
 .../elasticsearch/cluster/DiffableUtils.java  | 601 ++++++++++++++----
 .../cluster/routing/RoutingTable.java         |   4 +-
 .../cluster/serialization/DiffableTests.java  | 431 +++++++++++--
 .../zen/NodeJoinControllerTests.java          |   6 +-
 5 files changed, 876 insertions(+), 168 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/cluster/Diff.java b/core/src/main/java/org/elasticsearch/cluster/Diff.java
index 1a9fff246a9..76535a4b763 100644
--- a/core/src/main/java/org/elasticsearch/cluster/Diff.java
+++ b/core/src/main/java/org/elasticsearch/cluster/Diff.java
@@ -29,7 +29,7 @@ import java.io.IOException;
 public interface Diff<T> {
 
     /**
-     * Applies difference to the specified part and retunrs the resulted part
+     * Applies difference to the specified part and returns the resulted part
      */
     T apply(T part);
 
diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
index 84e0021ee00..1488f059437 100644
--- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
+++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
@@ -19,263 +19,630 @@
 
 package org.elasticsearch.cluster;
 
+import com.carrotsearch.hppc.cursors.IntCursor;
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
 
+import org.elasticsearch.common.collect.ImmutableOpenIntMap;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public final class DiffableUtils {
     private DiffableUtils() {
     }
 
+    /**
+     * Returns a map key serializer for String keys
+     */
+    public static KeySerializer<String> getStringKeySerializer() {
+        return StringKeySerializer.INSTANCE;
+    }
+
+    /**
+     * Returns a map key serializer for Integer keys. Encodes as Int.
+     */
+    public static KeySerializer<Integer> getIntKeySerializer() {
+        return IntKeySerializer.INSTANCE;
+    }
+
+    /**
+     * Returns a map key serializer for Integer keys. Encodes as VInt.
+     */
+    public static KeySerializer<Integer> getVIntKeySerializer() {
+        return VIntKeySerializer.INSTANCE;
+    }
+
     /**
      * Calculates diff between two ImmutableOpenMaps of Diffable objects
      */
-    public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> diff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
+    public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer) {
         assert after != null && before != null;
-        return new ImmutableOpenMapDiff<>(before, after);
+        return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
+    }
+
+    /**
+     * Calculates diff between two ImmutableOpenMaps of non-diffable objects
+     */
+    public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
+        assert after != null && before != null;
+        return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
+    }
+
+    /**
+     * Calculates diff between two ImmutableOpenIntMaps of Diffable objects
+     */
+    public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer) {
+        assert after != null && before != null;
+        return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
+    }
+
+    /**
+     * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
+     */
+    public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
+        assert after != null && before != null;
+        return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
     }
 
     /**
      * Calculates diff between two Maps of Diffable objects.
      */
-    public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) {
+    public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer) {
         assert after != null && before != null;
-        return new JdkMapDiff<>(before, after);
+        return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
+    }
+
+    /**
+     * Calculates diff between two Maps of non-diffable objects
+     */
+    public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
+        assert after != null && before != null;
+        return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
     }
 
     /**
      * Loads an object that represents difference between two ImmutableOpenMaps
      */
-    public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
-        return new ImmutableOpenMapDiff<>(in, keyedReader);
-    }
-
-    /**
-     * Loads an object that represents difference between two Maps.
-     */
-    public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
-        return new JdkMapDiff<>(in, keyedReader);
+    public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
+        return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer);
     }
 
     /**
      * Loads an object that represents difference between two ImmutableOpenMaps
      */
-    public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException {
-        return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto));
+    public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
+        return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer);
     }
 
     /**
-     * Loads an object that represents difference between two Maps.
+     * Loads an object that represents difference between two Maps of Diffable objects
      */
-    public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException {
-        return new JdkMapDiff<>(in, new PrototypeReader<>(proto));
+    public static <K, T> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
+        return new JdkMapDiff<>(in, keySerializer, valueSerializer);
     }
 
     /**
-     * A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's
-     * used in custom metadata deserialization.
+     * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
      */
-    public interface KeyedReader<T> {
-
-        /**
-         * reads an object of the type T from the stream input
-         */
-        T readFrom(StreamInput in, String key) throws IOException;
-
-        /**
-         * reads an object that respresents differences between two objects with the type T from the stream input
-         */
-        Diff<T> readDiffFrom(StreamInput in, String key) throws IOException;
+    public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
+        return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
     }
 
     /**
-     * Implementation of the KeyedReader that is using a prototype object for reading operations
-     *
-     * Note: this implementation is ignoring the key.
+     * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
      */
-    public static class PrototypeReader<T extends Diffable<T>> implements KeyedReader<T> {
-        private T proto;
-
-        public PrototypeReader(T proto) {
-            this.proto = proto;
-        }
-
-        @Override
-        public T readFrom(StreamInput in, String key) throws IOException {
-            return proto.readFrom(in);
-        }
-
-        @Override
-        public Diff<T> readDiffFrom(StreamInput in, String key) throws IOException {
-            return proto.readDiffFrom(in);
-        }
+    public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
+        return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
     }
 
     /**
-     * Represents differences between two Maps of Diffable objects.
+     * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
+     */
+    public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
+        return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
+    }
+
+    /**
+     * Represents differences between two Maps of (possibly diffable) objects.
      *
      * @param <T> the diffable object
      */
-    private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> {
+    private static class JdkMapDiff<K, T> extends MapDiff<K, T, Map<K, T>> {
 
-        protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
-            super(in, reader);
+        protected JdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
+            super(in, keySerializer, valueSerializer);
         }
 
-        public JdkMapDiff(Map<String, T> before, Map<String, T> after) {
+        public JdkMapDiff(Map<K, T> before, Map<K, T> after,
+                          KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
+            super(keySerializer, valueSerializer);
             assert after != null && before != null;
-            for (String key : before.keySet()) {
+
+            for (K key : before.keySet()) {
                 if (!after.containsKey(key)) {
                     deletes.add(key);
                 }
             }
-            for (Map.Entry<String, T> partIter : after.entrySet()) {
+
+            for (Map.Entry<K, T> partIter : after.entrySet()) {
                 T beforePart = before.get(partIter.getKey());
                 if (beforePart == null) {
-                    adds.put(partIter.getKey(), partIter.getValue());
+                    upserts.put(partIter.getKey(), partIter.getValue());
                 } else if (partIter.getValue().equals(beforePart) == false) {
-                    diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart));
+                    if (valueSerializer.supportsDiffableValues()) {
+                        diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart));
+                    } else {
+                        upserts.put(partIter.getKey(), partIter.getValue());
+                    }
                 }
             }
         }
 
         @Override
-        public Map<String, T> apply(Map<String, T> map) {
-            Map<String, T> builder = new HashMap<>();
+        public Map<K, T> apply(Map<K, T> map) {
+            Map<K, T> builder = new HashMap<>();
             builder.putAll(map);
 
-            for (String part : deletes) {
+            for (K part : deletes) {
                 builder.remove(part);
             }
 
-            for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
+            for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
                 builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
             }
 
-            for (Map.Entry<String, T> additon : adds.entrySet()) {
-                builder.put(additon.getKey(), additon.getValue());
+            for (Map.Entry<K, T> upsert : upserts.entrySet()) {
+                builder.put(upsert.getKey(), upsert.getValue());
             }
             return builder;
         }
     }
 
     /**
-     * Represents differences between two ImmutableOpenMap of diffable objects
+     * Represents differences between two ImmutableOpenMap of (possibly diffable) objects
      *
-     * @param <T> the diffable object
+     * @param <T> the object type
      */
-    private static class ImmutableOpenMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableOpenMap<String, T>> {
+    private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
 
-        protected ImmutableOpenMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
-            super(in, reader);
+        protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
+            super(in, keySerializer, valueSerializer);
         }
 
-        public ImmutableOpenMapDiff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
+        public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
+                                    KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
+            super(keySerializer, valueSerializer);
             assert after != null && before != null;
-            for (ObjectCursor<String> key : before.keys()) {
+
+            for (ObjectCursor<K> key : before.keys()) {
                 if (!after.containsKey(key.value)) {
                     deletes.add(key.value);
                 }
             }
-            for (ObjectObjectCursor<String, T> partIter : after) {
+
+            for (ObjectObjectCursor<K, T> partIter : after) {
                 T beforePart = before.get(partIter.key);
                 if (beforePart == null) {
-                    adds.put(partIter.key, partIter.value);
+                    upserts.put(partIter.key, partIter.value);
                 } else if (partIter.value.equals(beforePart) == false) {
-                    diffs.put(partIter.key, partIter.value.diff(beforePart));
+                    if (valueSerializer.supportsDiffableValues()) {
+                        diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
+                    } else {
+                        upserts.put(partIter.key, partIter.value);
+                    }
                 }
             }
         }
 
         @Override
-        public ImmutableOpenMap<String, T> apply(ImmutableOpenMap<String, T> map) {
-            ImmutableOpenMap.Builder<String, T> builder = ImmutableOpenMap.builder();
+        public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
+            ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
             builder.putAll(map);
 
-            for (String part : deletes) {
+            for (K part : deletes) {
                 builder.remove(part);
             }
 
-            for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
+            for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
                 builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
             }
 
-            for (Map.Entry<String, T> additon : adds.entrySet()) {
-                builder.put(additon.getKey(), additon.getValue());
+            for (Map.Entry<K, T> upsert : upserts.entrySet()) {
+                builder.put(upsert.getKey(), upsert.getValue());
             }
             return builder.build();
         }
     }
 
     /**
-     * Represents differences between two maps of diffable objects
+     * Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects
      *
-     * This class is used as base class for different map implementations
-     *
-     * @param <T> the diffable object
+     * @param <T> the object type
      */
-    private static abstract class MapDiff<T extends Diffable<T>, M> implements Diff<M> {
+    private static class ImmutableOpenIntMapDiff<T> extends MapDiff<Integer, T, ImmutableOpenIntMap<T>> {
 
-        protected final List<String> deletes;
-        protected final Map<String, Diff<T>> diffs;
-        protected final Map<String, T> adds;
-
-        protected MapDiff() {
-            deletes = new ArrayList<>();
-            diffs = new HashMap<>();
-            adds = new HashMap<>();
+        protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
+            super(in, keySerializer, valueSerializer);
         }
 
-        protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
+        public ImmutableOpenIntMapDiff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after,
+                                       KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
+            super(keySerializer, valueSerializer);
+            assert after != null && before != null;
+
+            for (IntCursor key : before.keys()) {
+                if (!after.containsKey(key.value)) {
+                    deletes.add(key.value);
+                }
+            }
+
+            for (IntObjectCursor<T> partIter : after) {
+                T beforePart = before.get(partIter.key);
+                if (beforePart == null) {
+                    upserts.put(partIter.key, partIter.value);
+                } else if (partIter.value.equals(beforePart) == false) {
+                    if (valueSerializer.supportsDiffableValues()) {
+                        diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
+                    } else {
+                        upserts.put(partIter.key, partIter.value);
+                    }
+                }
+            }
+        }
+
+        @Override
+        public ImmutableOpenIntMap<T> apply(ImmutableOpenIntMap<T> map) {
+            ImmutableOpenIntMap.Builder<T> builder = ImmutableOpenIntMap.builder();
+            builder.putAll(map);
+
+            for (Integer part : deletes) {
+                builder.remove(part);
+            }
+
+            for (Map.Entry<Integer, Diff<T>> diff : diffs.entrySet()) {
+                builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
+            }
+
+            for (Map.Entry<Integer, T> upsert : upserts.entrySet()) {
+                builder.put(upsert.getKey(), upsert.getValue());
+            }
+            return builder.build();
+        }
+    }
+
+    /**
+     * Represents differences between two maps of objects and is used as base class for different map implementations.
+     *
+     * Implements serialization. How differences are applied is left to subclasses.
+     *
+     * @param <K> the type of map keys
+     * @param <T> the type of map values
+     * @param <M> the map implementation type
+     */
+    public static abstract class MapDiff<K, T, M> implements Diff<M> {
+
+        protected final List<K> deletes;
+        protected final Map<K, Diff<T>> diffs; // incremental updates
+        protected final Map<K, T> upserts; // additions or full updates
+        protected final KeySerializer<K> keySerializer;
+        protected final ValueSerializer<K, T> valueSerializer;
+
+        protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
+            this.keySerializer = keySerializer;
+            this.valueSerializer = valueSerializer;
             deletes = new ArrayList<>();
             diffs = new HashMap<>();
-            adds = new HashMap<>();
+            upserts = new HashMap<>();
+        }
+
+        protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
+            this.keySerializer = keySerializer;
+            this.valueSerializer = valueSerializer;
+            deletes = new ArrayList<>();
+            diffs = new HashMap<>();
+            upserts = new HashMap<>();
             int deletesCount = in.readVInt();
             for (int i = 0; i < deletesCount; i++) {
-                deletes.add(in.readString());
+                deletes.add(keySerializer.readKey(in));
             }
-
             int diffsCount = in.readVInt();
             for (int i = 0; i < diffsCount; i++) {
-                String key = in.readString();
-                Diff<T> diff = reader.readDiffFrom(in, key);
+                K key = keySerializer.readKey(in);
+                Diff<T> diff = valueSerializer.readDiff(in, key);
                 diffs.put(key, diff);
             }
-
-            int addsCount = in.readVInt();
-            for (int i = 0; i < addsCount; i++) {
-                String key = in.readString();
-                T part = reader.readFrom(in, key);
-                adds.put(key, part);
+            int upsertsCount = in.readVInt();
+            for (int i = 0; i < upsertsCount; i++) {
+                K key = keySerializer.readKey(in);
+                T newValue = valueSerializer.read(in, key);
+                upserts.put(key, newValue);
             }
         }
+
+        /**
+         * The keys that, when this diff is applied to a map, should be removed from the map.
+         *
+         * @return the list of keys that are deleted
+         */
+        public List<K> getDeletes() {
+            return deletes;
+        }
+
+        /**
+         * Map entries that, when this diff is applied to a map, should be
+         * incrementally updated. The incremental update is represented using
+         * the {@link Diff} interface.
+         *
+         * @return the map entries that are incrementally updated
+         */
+        public Map<K, Diff<T>> getDiffs() {
+            return diffs;
+        }
+
+        /**
+         * Map entries that, when this diff is applied to a map, should be
+         * added to the map or fully replace the previous value.
+         *
+         * @return the map entries that are additions or full updates
+         */
+        public Map<K, T> getUpserts() {
+            return upserts;
+        }
+
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             out.writeVInt(deletes.size());
-            for (String delete : deletes) {
-                out.writeString(delete);
+            for (K delete : deletes) {
+                keySerializer.writeKey(delete, out);
             }
-
             out.writeVInt(diffs.size());
-            for (Map.Entry<String, Diff<T>> entry : diffs.entrySet()) {
-                out.writeString(entry.getKey());
-                entry.getValue().writeTo(out);
+            for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
+                keySerializer.writeKey(entry.getKey(), out);
+                valueSerializer.writeDiff(entry.getValue(), out);
             }
-
-            out.writeVInt(adds.size());
-            for (Map.Entry<String, T> entry : adds.entrySet()) {
-                out.writeString(entry.getKey());
-                entry.getValue().writeTo(out);
+            out.writeVInt(upserts.size());
+            for (Map.Entry<K, T> entry : upserts.entrySet()) {
+                keySerializer.writeKey(entry.getKey(), out);
+                valueSerializer.write(entry.getValue(), out);
             }
         }
     }
+
+    /**
+     * Provides read and write operations to serialize keys of map
+     * @param <K> type of key
+     */
+    public interface KeySerializer<K> {
+        void writeKey(K key, StreamOutput out) throws IOException;
+        K readKey(StreamInput in) throws IOException;
+    }
+
+    /**
+     * Serializes String keys of a map
+     */
+    private static final class StringKeySerializer implements KeySerializer<String> {
+        private static final StringKeySerializer INSTANCE = new StringKeySerializer();
+
+        @Override
+        public void writeKey(String key, StreamOutput out) throws IOException {
+            out.writeString(key);
+        }
+
+        @Override
+        public String readKey(StreamInput in) throws IOException {
+            return in.readString();
+        }
+    }
+
+    /**
+     * Serializes Integer keys of a map as an Int
+     */
+    private static final class IntKeySerializer implements KeySerializer<Integer> {
+        public static final IntKeySerializer INSTANCE = new IntKeySerializer();
+
+        @Override
+        public void writeKey(Integer key, StreamOutput out) throws IOException {
+            out.writeInt(key);
+        }
+
+        @Override
+        public Integer readKey(StreamInput in) throws IOException {
+            return in.readInt();
+        }
+    }
+
+    /**
+     * Serializes Integer keys of a map as a VInt. Requires keys to be positive.
+     */
+    private static final class VIntKeySerializer implements KeySerializer<Integer> {
+        public static final IntKeySerializer INSTANCE = new IntKeySerializer();
+
+        @Override
+        public void writeKey(Integer key, StreamOutput out) throws IOException {
+            if (key < 0) {
+                throw new IllegalArgumentException("Map key [" + key + "] must be positive");
+            }
+            out.writeVInt(key);
+        }
+
+        @Override
+        public Integer readKey(StreamInput in) throws IOException {
+            return in.readVInt();
+        }
+    }
+
+    /**
+     * Provides read and write operations to serialize map values.
+     * Reading of values can be made dependent on map key.
+     *
+     * Also provides operations to distinguish whether map values are diffable.
+     *
+     * Should not be directly implemented, instead implement either
+     * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
+     *
+     * @param <K> key type of map
+     * @param <V> value type of map
+     */
+    public interface ValueSerializer<K, V> {
+
+        /**
+         * Writes value to stream
+         */
+        void write(V value, StreamOutput out) throws IOException;
+
+        /**
+         * Reads value from stream. Reading operation can be made dependent on map key.
+         */
+        V read(StreamInput in, K key) throws IOException;
+
+        /**
+         * Whether this serializer supports diffable values
+         */
+        boolean supportsDiffableValues();
+
+        /**
+         * Computes diff if this serializer supports diffable values
+         */
+        Diff<V> diff(V value, V beforePart);
+
+        /**
+         * Writes value as diff to stream if this serializer supports diffable values
+         */
+        void writeDiff(Diff<V> value, StreamOutput out) throws IOException;
+
+        /**
+         * Reads value as diff from stream if this serializer supports diffable values.
+         * Reading operation can be made dependent on map key.
+         */
+        Diff<V> readDiff(StreamInput in, K key) throws IOException;
+    }
+
+    /**
+     * Serializer for Diffable map values. Needs to implement read and readDiff methods.
+     *
+     * @param <K> type of map keys
+     * @param <V> type of map values
+     */
+    public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
+        private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
+            @Override
+            public Object read(StreamInput in, Object key) throws IOException {
+                throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public Diff<Object> readDiff(StreamInput in, Object key) throws IOException {
+                throw new UnsupportedOperationException();
+            }
+        };
+
+        private static <K, V extends Diffable<V>> DiffableValueSerializer<K, V> getWriteOnlyInstance() {
+            return WRITE_ONLY_INSTANCE;
+        }
+
+        @Override
+        public boolean supportsDiffableValues() {
+            return true;
+        }
+
+        @Override
+        public Diff<V> diff(V value, V beforePart) {
+            return value.diff(beforePart);
+        }
+
+        @Override
+        public void write(V value, StreamOutput out) throws IOException {
+            value.writeTo(out);
+        }
+
+        public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
+            value.writeTo(out);
+        }
+    }
+
+    /**
+     * Serializer for non-diffable map values
+     *
+     * @param <K> type of map keys
+     * @param <V> type of map values
+     */
+    public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
+        @Override
+        public boolean supportsDiffableValues() {
+            return false;
+        }
+
+        @Override
+        public Diff<V> diff(V value, V beforePart) {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public Diff<V> readDiff(StreamInput in, K key) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    /**
+     * Implementation of the ValueSerializer that uses a prototype object for reading operations
+     *
+     * Note: this implementation is ignoring the key.
+     */
+    public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
+        private final V proto;
+
+        public DiffablePrototypeValueReader(V proto) {
+            this.proto = proto;
+        }
+
+        @Override
+        public V read(StreamInput in, K key) throws IOException {
+            return proto.readFrom(in);
+        }
+
+        @Override
+        public Diff<V> readDiff(StreamInput in, K key) throws IOException {
+            return proto.readDiffFrom(in);
+        }
+    }
+
+    /**
+     * Implementation of ValueSerializer that serializes immutable sets
+     *
+     * @param <K> type of map key
+     */
+    public static class StringSetValueSerializer<K> extends NonDiffableValueSerializer<K, Set<String>> {
+        private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer();
+
+        public static <K> StringSetValueSerializer<K> getInstance() {
+            return INSTANCE;
+        }
+
+        @Override
+        public void write(Set<String> value, StreamOutput out) throws IOException {
+            out.writeStringArray(value.toArray(new String[value.size()]));
+        }
+
+        @Override
+        public Set<String> read(StreamInput in, K key) throws IOException {
+            return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(in.readStringArray())));
+        }
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
index 1a582e63ad2..c210539bc58 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -314,12 +314,12 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
 
         public RoutingTableDiff(RoutingTable before, RoutingTable after) {
             version = after.version;
-            indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting);
+            indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
         }
 
         public RoutingTableDiff(StreamInput in) throws IOException {
             version = in.readLong();
-            indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, IndexRoutingTable.PROTO);
+            indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
         }
 
         @Override
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java
index 6c120e7ac6c..b3050392c98 100644
--- a/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java
@@ -22,68 +22,383 @@ package org.elasticsearch.cluster.serialization;
 import org.elasticsearch.cluster.AbstractDiffable;
 import org.elasticsearch.cluster.Diff;
 import org.elasticsearch.cluster.DiffableUtils;
-import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
+import org.elasticsearch.cluster.DiffableUtils.MapDiff;
+import org.elasticsearch.common.collect.ImmutableOpenIntMap;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.StreamableReader;
+import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.test.ESTestCase;
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
+import static java.util.Collections.emptyMap;
 import static java.util.Collections.unmodifiableMap;
 import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.CoreMatchers.nullValue;
 
 public class DiffableTests extends ESTestCase {
-    public void testJdkMapDiff() throws IOException {
-        Map<String, TestDiffable> before = new HashMap<>();
-        before.put("foo", new TestDiffable("1"));
-        before.put("bar", new TestDiffable("2"));
-        before.put("baz", new TestDiffable("3"));
-        before = unmodifiableMap(before);
-        Map<String, TestDiffable> map = new HashMap<>();
-        map.putAll(before);
-        map.remove("bar");
-        map.put("baz", new TestDiffable("4"));
-        map.put("new", new TestDiffable("5"));
-        Map<String, TestDiffable> after = unmodifiableMap(new HashMap<>(map));
-        Diff diff = DiffableUtils.diff(before, after);
-        BytesStreamOutput out = new BytesStreamOutput();
-        diff.writeTo(out);
-        StreamInput in = StreamInput.wrap(out.bytes());
-        Map<String, TestDiffable> serialized = DiffableUtils.readJdkMapDiff(in, TestDiffable.PROTO).apply(before);
-        assertThat(serialized.size(), equalTo(3));
-        assertThat(serialized.get("foo").value(), equalTo("1"));
-        assertThat(serialized.get("baz").value(), equalTo("4"));
-        assertThat(serialized.get("new").value(), equalTo("5"));
+
+    public void testJKDMapDiff() throws IOException {
+        new JdkMapDriver<TestDiffable>() {
+            @Override
+            protected boolean diffableValues() {
+                return true;
+            }
+
+            @Override
+            protected TestDiffable createValue(Integer key, boolean before) {
+                return new TestDiffable(String.valueOf(before ? key : key + 1));
+            }
+
+            @Override
+            protected MapDiff diff(Map<Integer, TestDiffable> before, Map<Integer, TestDiffable> after) {
+                return DiffableUtils.diff(before, after, keySerializer);
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return useProtoForDiffableSerialization
+                        ? DiffableUtils.readJdkMapDiff(in, keySerializer, TestDiffable.PROTO)
+                        : DiffableUtils.readJdkMapDiff(in, keySerializer, diffableValueSerializer());
+            }
+        }.execute();
+
+        new JdkMapDriver<String>() {
+            @Override
+            protected boolean diffableValues() {
+                return false;
+            }
+
+            @Override
+            protected String createValue(Integer key, boolean before) {
+                return String.valueOf(before ? key : key + 1);
+            }
+
+            @Override
+            protected MapDiff diff(Map<Integer, String> before, Map<Integer, String> after) {
+                return DiffableUtils.diff(before, after, keySerializer, nonDiffableValueSerializer());
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return DiffableUtils.readJdkMapDiff(in, keySerializer, nonDiffableValueSerializer());
+            }
+        }.execute();
     }
 
     public void testImmutableOpenMapDiff() throws IOException {
-        ImmutableOpenMap.Builder<String, TestDiffable> builder = ImmutableOpenMap.builder();
-        builder.put("foo", new TestDiffable("1"));
-        builder.put("bar", new TestDiffable("2"));
-        builder.put("baz", new TestDiffable("3"));
-        ImmutableOpenMap<String, TestDiffable> before = builder.build();
-        builder = ImmutableOpenMap.builder(before);
-        builder.remove("bar");
-        builder.put("baz", new TestDiffable("4"));
-        builder.put("new", new TestDiffable("5"));
-        ImmutableOpenMap<String, TestDiffable> after = builder.build();
-        Diff diff = DiffableUtils.diff(before, after);
-        BytesStreamOutput out = new BytesStreamOutput();
-        diff.writeTo(out);
-        StreamInput in = StreamInput.wrap(out.bytes());
-        ImmutableOpenMap<String, TestDiffable> serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<TestDiffable>() {
+        new ImmutableOpenMapDriver<TestDiffable>() {
             @Override
-            public TestDiffable readFrom(StreamInput in, String key) throws IOException {
+            protected boolean diffableValues() {
+                return true;
+            }
+
+            @Override
+            protected TestDiffable createValue(Integer key, boolean before) {
+                return new TestDiffable(String.valueOf(before ? key : key + 1));
+            }
+
+            @Override
+            protected MapDiff diff(ImmutableOpenMap<Integer, TestDiffable> before, ImmutableOpenMap<Integer, TestDiffable> after) {
+                return DiffableUtils.diff(before, after, keySerializer);
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return useProtoForDiffableSerialization
+                        ? DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, TestDiffable.PROTO)
+                        : DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, diffableValueSerializer());
+            }
+        }.execute();
+
+        new ImmutableOpenMapDriver<String>() {
+            @Override
+            protected boolean diffableValues() {
+                return false;
+            }
+
+            @Override
+            protected String createValue(Integer key, boolean before) {
+                return String.valueOf(before ? key : key + 1);
+            }
+
+            @Override
+            protected MapDiff diff(ImmutableOpenMap<Integer, String> before, ImmutableOpenMap<Integer, String> after) {
+                return DiffableUtils.diff(before, after, keySerializer, nonDiffableValueSerializer());
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return DiffableUtils.readImmutableOpenMapDiff(in, keySerializer, nonDiffableValueSerializer());
+            }
+        }.execute();
+    }
+
+    public void testImmutableOpenIntMapDiff() throws IOException {
+        new ImmutableOpenIntMapDriver<TestDiffable>() {
+            @Override
+            protected boolean diffableValues() {
+                return true;
+            }
+
+            @Override
+            protected TestDiffable createValue(Integer key, boolean before) {
+                return new TestDiffable(String.valueOf(before ? key : key + 1));
+            }
+
+            @Override
+            protected MapDiff diff(ImmutableOpenIntMap<TestDiffable> before, ImmutableOpenIntMap<TestDiffable> after) {
+                return DiffableUtils.diff(before, after, keySerializer);
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return useProtoForDiffableSerialization
+                        ? DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, TestDiffable.PROTO)
+                        : DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, diffableValueSerializer());
+            }
+        }.execute();
+
+        new ImmutableOpenIntMapDriver<String>() {
+            @Override
+            protected boolean diffableValues() {
+                return false;
+            }
+
+            @Override
+            protected String createValue(Integer key, boolean before) {
+                return String.valueOf(before ? key : key + 1);
+            }
+
+            @Override
+            protected MapDiff diff(ImmutableOpenIntMap<String> before, ImmutableOpenIntMap<String> after) {
+                return DiffableUtils.diff(before, after, keySerializer, nonDiffableValueSerializer());
+            }
+
+            @Override
+            protected MapDiff readDiff(StreamInput in) throws IOException {
+                return DiffableUtils.readImmutableOpenIntMapDiff(in, keySerializer, nonDiffableValueSerializer());
+            }
+        }.execute();
+    }
+
+    /**
+     * Class that abstracts over specific map implementation type and value kind (Diffable or not)
+     * @param <T> map type
+     * @param <V> value type
+     */
+    public abstract class MapDriver<T, V> {
+        protected final Set<Integer> keys = randomPositiveIntSet();
+        protected final Set<Integer> keysToRemove = new HashSet<>(randomSubsetOf(randomInt(keys.size()), keys.toArray(new Integer[keys.size()])));
+        protected final Set<Integer> keysThatAreNotRemoved = Sets.difference(keys, keysToRemove);
+        protected final Set<Integer> keysToOverride = new HashSet<>(randomSubsetOf(randomInt(keysThatAreNotRemoved.size()),
+                keysThatAreNotRemoved.toArray(new Integer[keysThatAreNotRemoved.size()])));
+        protected final Set<Integer> keysToAdd = Sets.difference(randomPositiveIntSet(), keys); // make sure keysToAdd does not contain elements in keys
+        protected final Set<Integer> keysUnchanged = Sets.difference(keysThatAreNotRemoved, keysToOverride);
+
+        protected final DiffableUtils.KeySerializer<Integer> keySerializer = randomBoolean()
+                ? DiffableUtils.getIntKeySerializer()
+                : DiffableUtils.getVIntKeySerializer();
+
+        protected final boolean useProtoForDiffableSerialization = randomBoolean();
+
+        private Set<Integer> randomPositiveIntSet() {
+            int maxSetSize = randomInt(6);
+            Set<Integer> result = new HashSet<>();
+            for (int i = 0; i < maxSetSize; i++) {
+                // due to duplicates, set size can be smaller than maxSetSize
+                result.add(randomIntBetween(0, 100));
+            }
+            return result;
+        }
+
+        /**
+         * whether we operate on {@link org.elasticsearch.cluster.Diffable} values
+         */
+        protected abstract boolean diffableValues();
+
+        /**
+         * functions that determines value in "before" or "after" map based on key
+         */
+        protected abstract V createValue(Integer key, boolean before);
+
+        /**
+         * creates map based on JDK-based map
+         */
+        protected abstract T createMap(Map<Integer, V> values);
+
+        /**
+         * calculates diff between two maps
+         */
+        protected abstract MapDiff<Integer, V, T> diff(T before, T after);
+
+        /**
+         * reads diff of maps from stream
+         */
+        protected abstract MapDiff<Integer, V, T> readDiff(StreamInput in) throws IOException;
+
+        /**
+         * gets element at key "key" in map "map"
+         */
+        protected abstract V get(T map, Integer key);
+
+        /**
+         * returns size of given map
+         */
+        protected abstract int size(T map);
+
+        /**
+         * executes the actual test
+         */
+        public void execute() throws IOException {
+            logger.debug("Keys in 'before' map: {}", keys);
+            logger.debug("Keys to remove: {}", keysToRemove);
+            logger.debug("Keys to override: {}", keysToOverride);
+            logger.debug("Keys to add: {}", keysToAdd);
+
+            logger.debug("--> creating 'before' map");
+            Map<Integer, V> before = new HashMap<>();
+            for (Integer key : keys) {
+                before.put(key, createValue(key, true));
+            }
+            T beforeMap = createMap(before);
+
+            logger.debug("--> creating 'after' map");
+            Map<Integer, V> after = new HashMap<>();
+            after.putAll(before);
+            for (Integer key : keysToRemove) {
+                after.remove(key);
+            }
+            for (Integer key : keysToOverride) {
+                after.put(key, createValue(key, false));
+            }
+            for (Integer key : keysToAdd) {
+                after.put(key, createValue(key, false));
+            }
+            T afterMap = createMap(unmodifiableMap(after));
+
+            MapDiff<Integer, V, T> diffMap = diff(beforeMap, afterMap);
+
+            // check properties of diffMap
+            assertThat(new HashSet(diffMap.getDeletes()), equalTo(keysToRemove));
+            if (diffableValues()) {
+                assertThat(diffMap.getDiffs().keySet(), equalTo(keysToOverride));
+                for (Integer key : keysToOverride) {
+                    assertThat(diffMap.getDiffs().get(key).apply(get(beforeMap, key)), equalTo(get(afterMap, key)));
+                }
+                assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAdd));
+                for (Integer key : keysToAdd) {
+                    assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
+                }
+            } else {
+                assertThat(diffMap.getDiffs(), equalTo(emptyMap()));
+                Set<Integer> keysToAddAndOverride = Sets.union(keysToAdd, keysToOverride);
+                assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAddAndOverride));
+                for (Integer key : keysToAddAndOverride) {
+                    assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
+                }
+            }
+
+            if (randomBoolean()) {
+                logger.debug("--> serializing diff");
+                BytesStreamOutput out = new BytesStreamOutput();
+                diffMap.writeTo(out);
+                StreamInput in = StreamInput.wrap(out.bytes());
+                logger.debug("--> reading diff back");
+                diffMap = readDiff(in);
+            }
+            T appliedDiffMap = diffMap.apply(beforeMap);
+
+            // check properties of appliedDiffMap
+            assertThat(size(appliedDiffMap), equalTo(keys.size() - keysToRemove.size() + keysToAdd.size()));
+            for (Integer key : keysToRemove) {
+                assertThat(get(appliedDiffMap, key), nullValue());
+            }
+            for (Integer key : keysUnchanged) {
+                assertThat(get(appliedDiffMap, key), equalTo(get(beforeMap, key)));
+            }
+            for (Integer key : keysToOverride) {
+                assertThat(get(appliedDiffMap, key), not(equalTo(get(beforeMap, key))));
+                assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
+            }
+            for (Integer key : keysToAdd) {
+                assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
+            }
+        }
+    }
+
+    abstract class JdkMapDriver<V> extends MapDriver<Map<Integer, V>, V> {
+
+        @Override
+        protected Map<Integer, V> createMap(Map values) {
+            return values;
+        }
+
+        @Override
+        protected V get(Map<Integer, V> map, Integer key) {
+            return map.get(key);
+        }
+
+        @Override
+        protected int size(Map<Integer, V> map) {
+            return map.size();
+        }
+    }
+
+    abstract class ImmutableOpenMapDriver<V> extends MapDriver<ImmutableOpenMap<Integer, V>, V> {
+
+        @Override
+        protected ImmutableOpenMap<Integer, V> createMap(Map values) {
+            return ImmutableOpenMap.<Integer, V>builder().putAll(values).build();
+        }
+
+        @Override
+        protected V get(ImmutableOpenMap<Integer, V> map, Integer key) {
+            return map.get(key);
+        }
+
+        @Override
+        protected int size(ImmutableOpenMap<Integer, V> map) {
+            return map.size();
+        }
+    }
+
+
+    abstract class ImmutableOpenIntMapDriver<V> extends MapDriver<ImmutableOpenIntMap<V>, V> {
+
+        @Override
+        protected ImmutableOpenIntMap<V> createMap(Map values) {
+            return ImmutableOpenIntMap.<V>builder().putAll(values).build();
+        }
+
+        @Override
+        protected V get(ImmutableOpenIntMap<V> map, Integer key) {
+            return map.get(key);
+        }
+
+        @Override
+        protected int size(ImmutableOpenIntMap<V> map) {
+            return map.size();
+        }
+    }
+
+    private static <K> DiffableUtils.DiffableValueSerializer<K, TestDiffable> diffableValueSerializer() {
+        return new DiffableUtils.DiffableValueSerializer<K, TestDiffable>() {
+            @Override
+            public TestDiffable read(StreamInput in, K key) throws IOException {
                 return new TestDiffable(in.readString());
             }
 
             @Override
-            public Diff<TestDiffable> readDiffFrom(StreamInput in, String key) throws IOException {
+            public Diff<TestDiffable> readDiff(StreamInput in, K key) throws IOException {
                 return AbstractDiffable.readDiffFrom(new StreamableReader<TestDiffable>() {
                     @Override
                     public TestDiffable readFrom(StreamInput in) throws IOException {
@@ -91,13 +406,23 @@ public class DiffableTests extends ESTestCase {
                     }
                 }, in);
             }
-        }).apply(before);
-        assertThat(serialized.size(), equalTo(3));
-        assertThat(serialized.get("foo").value(), equalTo("1"));
-        assertThat(serialized.get("baz").value(), equalTo("4"));
-        assertThat(serialized.get("new").value(), equalTo("5"));
-
+        };
     }
+
+    private static <K> DiffableUtils.NonDiffableValueSerializer<K, String> nonDiffableValueSerializer() {
+        return new DiffableUtils.NonDiffableValueSerializer<K, String>() {
+            @Override
+            public void write(String value, StreamOutput out) throws IOException {
+                out.writeString(value);
+            }
+
+            @Override
+            public String read(StreamInput in, K key) throws IOException {
+                return in.readString();
+            }
+        };
+    }
+
     public static class TestDiffable extends AbstractDiffable<TestDiffable> {
 
         public static final TestDiffable PROTO = new TestDiffable("");
@@ -121,6 +446,22 @@ public class DiffableTests extends ESTestCase {
         public void writeTo(StreamOutput out) throws IOException {
             out.writeString(value);
         }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+
+            TestDiffable that = (TestDiffable) o;
+
+            return !(value != null ? !value.equals(that.value) : that.value != null);
+
+        }
+
+        @Override
+        public int hashCode() {
+            return value != null ? value.hashCode() : 0;
+        }
     }
 
 }
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
index 76e6317c1fc..ea590756a8b 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java
@@ -488,17 +488,17 @@ public class NodeJoinControllerTests extends ESTestCase {
 
         @Override
         public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards, boolean withReroute) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
 
         @Override
         public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
 
         @Override
         protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
     }
 

From fef043a5bfca62deb8b9e20ace59c619983f0e1f Mon Sep 17 00:00:00 2001
From: Yannick Welsch <yannick@elastic.co>
Date: Fri, 4 Dec 2015 11:31:48 +0100
Subject: [PATCH 38/41] Persist currently started allocation IDs to index
 metadata

Closes #14964
---
 .../elasticsearch/cluster/ClusterState.java   |  22 +-
 .../cluster/metadata/IndexMetaData.java       | 228 +++++++++++++-----
 .../cluster/metadata/MetaData.java            |  19 +-
 .../routing/allocation/AllocationService.java |  97 ++++++--
 .../routing/allocation/RoutingAllocation.java |  20 +-
 .../allocation/ActiveAllocationIdTests.java   |  81 +++++++
 6 files changed, 371 insertions(+), 96 deletions(-)
 create mode 100644 core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java

diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
index c9246223f09..e20f21b4cec 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -19,9 +19,9 @@
 
 package org.elasticsearch.cluster;
 
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
 import org.elasticsearch.cluster.block.ClusterBlock;
 import org.elasticsearch.cluster.block.ClusterBlocks;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -469,6 +469,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
                 }
                 builder.endArray();
 
+                builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
+                for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
+                    builder.startArray(String.valueOf(cursor.key));
+                    for (String allocationId : cursor.value) {
+                        builder.value(allocationId);
+                    }
+                    builder.endArray();
+                }
+                builder.endObject();
+
                 builder.endObject();
             }
             builder.endObject();
@@ -584,6 +594,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
 
         public Builder routingResult(RoutingAllocation.Result routingResult) {
             this.routingTable = routingResult.routingTable();
+            this.metaData = routingResult.metaData();
             return this;
         }
 
@@ -759,7 +770,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
             nodes = after.nodes.diff(before.nodes);
             metaData = after.metaData.diff(before.metaData);
             blocks = after.blocks.diff(before.blocks);
-            customs = DiffableUtils.diff(before.customs, after.customs);
+            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
         }
 
         public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
@@ -771,14 +782,15 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
             nodes = proto.nodes.readDiffFrom(in);
             metaData = proto.metaData.readDiffFrom(in);
             blocks = proto.blocks.readDiffFrom(in);
-            customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
+            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
+                    new DiffableUtils.DiffableValueSerializer<String, Custom>() {
                 @Override
-                public Custom readFrom(StreamInput in, String key) throws IOException {
+                public Custom read(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readFrom(in);
                 }
 
                 @Override
-                public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
+                public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readDiffFrom(in);
                 }
             });
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index 42e9a4b2244..669d71477ca 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -19,6 +19,7 @@
 
 package org.elasticsearch.cluster.metadata;
 
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
 import org.elasticsearch.Version;
@@ -30,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
 import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.collect.ImmutableOpenIntMap;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.collect.MapBuilder;
 import org.elasticsearch.common.compress.CompressedXContent;
@@ -46,10 +48,13 @@ import org.joda.time.DateTimeZone;
 
 import java.io.IOException;
 import java.text.ParseException;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Set;
 
 import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
 import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@@ -168,6 +173,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
     public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
     public static final String INDEX_UUID_NA_VALUE = "_na_";
 
+    public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
+
     private final int numberOfShards;
     private final int numberOfReplicas;
 
@@ -184,6 +191,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
 
     private final ImmutableOpenMap<String, Custom> customs;
 
+    private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
+
     private transient final int totalNumberOfShards;
 
     private final DiscoveryNodeFilters requireFilters;
@@ -194,65 +203,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
     private final Version indexUpgradedVersion;
     private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
 
-    private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
-        Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
-        if (maybeNumberOfShards == null) {
-            throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
-        }
-        int numberOfShards = maybeNumberOfShards;
-        if (numberOfShards <= 0) {
-            throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
-        }
+    private IndexMetaData(String index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
+                          ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
+                          ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
+                          DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
+                          Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {
 
-        Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
-        if (maybeNumberOfReplicas == null) {
-            throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
-        }
-        int numberOfReplicas = maybeNumberOfReplicas;
-        if (numberOfReplicas < 0) {
-            throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
-        }
         this.index = index;
         this.version = version;
         this.state = state;
-        this.settings = settings;
-        this.mappings = mappings;
-        this.customs = customs;
         this.numberOfShards = numberOfShards;
         this.numberOfReplicas = numberOfReplicas;
         this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
+        this.settings = settings;
+        this.mappings = mappings;
+        this.customs = customs;
         this.aliases = aliases;
-
-        Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
-        if (requireMap.isEmpty()) {
-            requireFilters = null;
-        } else {
-            requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
-        }
-        Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
-        if (includeMap.isEmpty()) {
-            includeFilters = null;
-        } else {
-            includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
-        }
-        Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
-        if (excludeMap.isEmpty()) {
-            excludeFilters = null;
-        } else {
-            excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
-        }
-        indexCreatedVersion = Version.indexCreated(settings);
-        indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
-        String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
-        if (stringLuceneVersion != null) {
-            try {
-                this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
-            } catch (ParseException ex) {
-                throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
-            }
-        } else {
-            this.minimumCompatibleLuceneVersion = null;
-        }
+        this.activeAllocationIds = activeAllocationIds;
+        this.requireFilters = requireFilters;
+        this.includeFilters = includeFilters;
+        this.excludeFilters = excludeFilters;
+        this.indexCreatedVersion = indexCreatedVersion;
+        this.indexUpgradedVersion = indexUpgradedVersion;
+        this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
     }
 
     public String getIndex() {
@@ -364,6 +337,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
         return (T) customs.get(type);
     }
 
+    public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
+        return activeAllocationIds;
+    }
+
+    public Set<String> activeAllocationIds(int shardId) {
+        assert shardId >= 0 && shardId < numberOfShards;
+        return activeAllocationIds.get(shardId);
+    }
+
     @Nullable
     public DiscoveryNodeFilters requireFilters() {
         return requireFilters;
@@ -408,6 +390,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
         if (!customs.equals(that.customs)) {
             return false;
         }
+        if (!activeAllocationIds.equals(that.activeAllocationIds)) {
+            return false;
+        }
         return true;
     }
 
@@ -418,6 +403,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
         result = 31 * result + aliases.hashCode();
         result = 31 * result + settings.hashCode();
         result = 31 * result + mappings.hashCode();
+        result = 31 * result + activeAllocationIds.hashCode();
         return result;
     }
 
@@ -450,16 +436,19 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
         private final Settings settings;
         private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
         private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
-        private Diff<ImmutableOpenMap<String, Custom>> customs;
+        private final Diff<ImmutableOpenMap<String, Custom>> customs;
+        private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
 
         public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
             index = after.index;
             version = after.version;
             state = after.state;
             settings = after.settings;
-            mappings = DiffableUtils.diff(before.mappings, after.mappings);
-            aliases = DiffableUtils.diff(before.aliases, after.aliases);
-            customs = DiffableUtils.diff(before.customs, after.customs);
+            mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
+            aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
+            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
+            activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
+                    DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
         }
 
         public IndexMetaDataDiff(StreamInput in) throws IOException {
@@ -467,19 +456,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             version = in.readLong();
             state = State.fromId(in.readByte());
             settings = Settings.readSettingsFromStream(in);
-            mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO);
-            aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO);
-            customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader<Custom>() {
+            mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
+            aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
+            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
+                    new DiffableUtils.DiffableValueSerializer<String, Custom>() {
                 @Override
-                public Custom readFrom(StreamInput in, String key) throws IOException {
+                public Custom read(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readFrom(in);
                 }
 
                 @Override
-                public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
+                public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readDiffFrom(in);
                 }
             });
+            activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
+                    DiffableUtils.StringSetValueSerializer.getInstance());
         }
 
         @Override
@@ -491,6 +483,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             mappings.writeTo(out);
             aliases.writeTo(out);
             customs.writeTo(out);
+            activeAllocationIds.writeTo(out);
         }
 
         @Override
@@ -502,6 +495,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             builder.mappings.putAll(mappings.apply(part.mappings));
             builder.aliases.putAll(aliases.apply(part.aliases));
             builder.customs.putAll(customs.apply(part.customs));
+            builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
             return builder.build();
         }
     }
@@ -528,6 +522,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
             builder.putCustom(type, customIndexMetaData);
         }
+        int activeAllocationIdsSize = in.readVInt();
+        for (int i = 0; i < activeAllocationIdsSize; i++) {
+            int key = in.readVInt();
+            Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
+            builder.putActiveAllocationIds(key, allocationIds);
+        }
         return builder.build();
     }
 
@@ -550,6 +550,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             out.writeString(cursor.key);
             cursor.value.writeTo(out);
         }
+        out.writeVInt(activeAllocationIds.size());
+        for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
+            out.writeVInt(cursor.key);
+            DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
+        }
     }
 
     public static Builder builder(String index) {
@@ -569,12 +574,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
         private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
         private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
         private final ImmutableOpenMap.Builder<String, Custom> customs;
+        private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
 
         public Builder(String index) {
             this.index = index;
             this.mappings = ImmutableOpenMap.builder();
             this.aliases = ImmutableOpenMap.builder();
             this.customs = ImmutableOpenMap.builder();
+            this.activeAllocationIds = ImmutableOpenIntMap.builder();
         }
 
         public Builder(IndexMetaData indexMetaData) {
@@ -585,6 +592,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
             this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
             this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
+            this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
         }
 
         public String index() {
@@ -693,6 +701,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             return this.customs.get(type);
         }
 
+        public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
+            activeAllocationIds.put(shardId, new HashSet(allocationIds));
+            return this;
+        }
+
+        public Set<String> getActiveAllocationIds(int shardId) {
+            return activeAllocationIds.get(shardId);
+        }
+
         public long version() {
             return this.version;
         }
@@ -714,7 +731,72 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
                 }
             }
 
-            return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
+            Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
+            if (maybeNumberOfShards == null) {
+                throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
+            }
+            int numberOfShards = maybeNumberOfShards;
+            if (numberOfShards <= 0) {
+                throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
+            }
+
+            Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
+            if (maybeNumberOfReplicas == null) {
+                throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
+            }
+            int numberOfReplicas = maybeNumberOfReplicas;
+            if (numberOfReplicas < 0) {
+                throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
+            }
+
+            // fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
+            ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
+            for (int i = 0; i < numberOfShards; i++) {
+                if (activeAllocationIds.containsKey(i)) {
+                    filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
+                } else {
+                    filledActiveAllocationIds.put(i, Collections.emptySet());
+                }
+            }
+
+            Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
+            final DiscoveryNodeFilters requireFilters;
+            if (requireMap.isEmpty()) {
+                requireFilters = null;
+            } else {
+                requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
+            }
+            Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
+            final DiscoveryNodeFilters includeFilters;
+            if (includeMap.isEmpty()) {
+                includeFilters = null;
+            } else {
+                includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
+            }
+            Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
+            final DiscoveryNodeFilters excludeFilters;
+            if (excludeMap.isEmpty()) {
+                excludeFilters = null;
+            } else {
+                excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
+            }
+            Version indexCreatedVersion = Version.indexCreated(settings);
+            Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
+            String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
+            final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
+            if (stringLuceneVersion != null) {
+                try {
+                    minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
+                } catch (ParseException ex) {
+                    throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
+                }
+            } else {
+                minimumCompatibleLuceneVersion = null;
+            }
+
+            return new IndexMetaData(index, version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
+                    tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
+                    indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
         }
 
         public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
@@ -757,6 +839,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
             }
             builder.endObject();
 
+            builder.startObject(KEY_ACTIVE_ALLOCATIONS);
+            for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
+                builder.startArray(String.valueOf(cursor.key));
+                for (String allocationId : cursor.value) {
+                    builder.value(allocationId);
+                }
+                builder.endArray();
+            }
+            builder.endObject();
 
             builder.endObject();
         }
@@ -792,6 +883,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
                         while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
                             builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
                         }
+                    } else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
+                        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+                            if (token == XContentParser.Token.FIELD_NAME) {
+                                currentFieldName = parser.currentName();
+                            } else if (token == XContentParser.Token.START_ARRAY) {
+                                String shardId = currentFieldName;
+                                Set<String> allocationIds = new HashSet<>();
+                                while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+                                    if (token == XContentParser.Token.VALUE_STRING) {
+                                        allocationIds.add(parser.text());
+                                    }
+                                }
+                                builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
+                            }
+                        }
                     } else {
                         // check if its a custom index metadata
                         Custom proto = lookupPrototype(currentFieldName);
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index d4e7baac790..657259747dd 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -27,7 +27,6 @@ import org.apache.lucene.util.CollectionUtil;
 import org.elasticsearch.cluster.Diff;
 import org.elasticsearch.cluster.Diffable;
 import org.elasticsearch.cluster.DiffableUtils;
-import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
 import org.elasticsearch.cluster.InternalClusterInfoService;
 import org.elasticsearch.cluster.block.ClusterBlock;
 import org.elasticsearch.cluster.block.ClusterBlockLevel;
@@ -55,7 +54,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.store.IndexStoreConfig;
 import org.elasticsearch.indices.recovery.RecoverySettings;
-import org.elasticsearch.indices.store.IndicesStore;
 import org.elasticsearch.indices.ttl.IndicesTTLService;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.search.warmer.IndexWarmersMetaData;
@@ -641,9 +639,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
             version = after.version;
             transientSettings = after.transientSettings;
             persistentSettings = after.persistentSettings;
-            indices = DiffableUtils.diff(before.indices, after.indices);
-            templates = DiffableUtils.diff(before.templates, after.templates);
-            customs = DiffableUtils.diff(before.customs, after.customs);
+            indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
+            templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
+            customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
         }
 
         public MetaDataDiff(StreamInput in) throws IOException {
@@ -651,16 +649,17 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
             version = in.readLong();
             transientSettings = Settings.readSettingsFromStream(in);
             persistentSettings = Settings.readSettingsFromStream(in);
-            indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO);
-            templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO);
-            customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
+            indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
+            templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
+            customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
+                    new DiffableUtils.DiffableValueSerializer<String, Custom>() {
                 @Override
-                public Custom readFrom(StreamInput in, String key) throws IOException {
+                public Custom read(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readFrom(in);
                 }
 
                 @Override
-                public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
+                public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
                     return lookupPrototypeSafe(key).readDiffFrom(in);
                 }
             });
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index f819d6fde0a..feafb76a5f2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -27,7 +27,14 @@ import org.elasticsearch.cluster.health.ClusterStateHealth;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.MetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.AllocationId;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
 import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
 import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
 import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@@ -39,6 +46,8 @@ import org.elasticsearch.common.settings.Settings;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
+import java.util.Set;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
@@ -79,24 +88,83 @@ public class AllocationService extends AbstractComponent {
         StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
         boolean changed = applyStartedShards(routingNodes, startedShards);
         if (!changed) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
         shardsAllocators.applyStartedShards(allocation);
         if (withReroute) {
             reroute(allocation);
         }
-        RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
-        RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
+        final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
 
         String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
         logClusterHealthStateChange(
                 new ClusterStateHealth(clusterState),
-                new ClusterStateHealth(clusterState.metaData(), routingTable),
+                new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
                 "shards started [" + startedShardsAsString + "] ..."
         );
         return result;
     }
 
+
+    protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
+        return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
+
+    }
+    protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
+        final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
+        MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable);
+        return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
+    }
+
+    /**
+     * Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
+     *
+     * @param currentMetaData {@link MetaData} object from before the routing table was changed.
+     * @param newRoutingTable new {@link RoutingTable} created by the allocation change
+     * @return adapted {@link MetaData}, potentially the original one if no change was needed.
+     */
+    static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
+        // make sure index meta data and routing tables are in sync w.r.t active allocation ids
+        MetaData.Builder metaDataBuilder = null;
+        for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
+            final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
+            if (indexMetaData == null) {
+                throw new IllegalStateException("no metadata found for index [" + indexRoutingTable.index() + "]");
+            }
+            IndexMetaData.Builder indexMetaDataBuilder = null;
+            for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
+                Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
+                        .map(ShardRouting::allocationId)
+                        .filter(Objects::nonNull)
+                        .map(AllocationId::getId)
+                        .collect(Collectors.toSet());
+                // only update active allocation ids if there is an active shard
+                if (activeAllocationIds.isEmpty() == false) {
+                    // get currently stored allocation ids
+                    Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
+                    if (activeAllocationIds.equals(storedAllocationIds) == false) {
+                        if (indexMetaDataBuilder == null) {
+                            indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
+                        }
+
+                        indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
+                    }
+                }
+            }
+            if (indexMetaDataBuilder != null) {
+                if (metaDataBuilder == null) {
+                    metaDataBuilder = MetaData.builder(currentMetaData);
+                }
+                metaDataBuilder.put(indexMetaDataBuilder);
+            }
+        }
+        if (metaDataBuilder != null) {
+            return metaDataBuilder.build();
+        } else {
+            return currentMetaData;
+        }
+    }
+
     public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
         return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
     }
@@ -117,16 +185,15 @@ public class AllocationService extends AbstractComponent {
                     System.nanoTime(), System.currentTimeMillis()));
         }
         if (!changed) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
         shardsAllocators.applyFailedShards(allocation);
         reroute(allocation);
-        RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
-        RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
+        final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
         String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
         logClusterHealthStateChange(
                 new ClusterStateHealth(clusterState),
-                new ClusterStateHealth(clusterState.getMetaData(), routingTable),
+                new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
                 "shards failed [" + failedShardsAsString + "] ..."
         );
         return result;
@@ -169,11 +236,10 @@ public class AllocationService extends AbstractComponent {
         // the assumption is that commands will move / act on shards (or fail through exceptions)
         // so, there will always be shard "movements", so no need to check on reroute
         reroute(allocation);
-        RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
-        RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable, explanations);
+        RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
         logClusterHealthStateChange(
                 new ClusterStateHealth(clusterState),
-                new ClusterStateHealth(clusterState.getMetaData(), routingTable),
+                new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
                 "reroute commands"
         );
         return result;
@@ -200,13 +266,12 @@ public class AllocationService extends AbstractComponent {
         RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
         allocation.debugDecision(debug);
         if (!reroute(allocation)) {
-            return new RoutingAllocation.Result(false, clusterState.routingTable());
+            return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
         }
-        RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
-        RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
+        RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
         logClusterHealthStateChange(
                 new ClusterStateHealth(clusterState),
-                new ClusterStateHealth(clusterState.getMetaData(), routingTable),
+                new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
                 reason
         );
         return result;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
index a8a546d946e..4e6ba0fb5ad 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -52,29 +52,33 @@ public class RoutingAllocation {
 
         private final RoutingTable routingTable;
 
+        private final MetaData metaData;
+
         private RoutingExplanations explanations = new RoutingExplanations();
 
         /**
          * Creates a new {@link RoutingAllocation.Result}
-         *
          * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
          * @param routingTable the {@link RoutingTable} this Result references
+         * @param metaData the {@link MetaData} this Result references
          */
-        public Result(boolean changed, RoutingTable routingTable) {
+        public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
             this.changed = changed;
             this.routingTable = routingTable;
+            this.metaData = metaData;
         }
 
         /**
          * Creates a new {@link RoutingAllocation.Result}
-         * 
          * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
          * @param routingTable the {@link RoutingTable} this Result references
+         * @param metaData the {@link MetaData} this Result references
          * @param explanations Explanation for the reroute actions
          */
-        public Result(boolean changed, RoutingTable routingTable, RoutingExplanations explanations) {
+        public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
             this.changed = changed;
             this.routingTable = routingTable;
+            this.metaData = metaData;
             this.explanations = explanations;
         }
 
@@ -85,6 +89,14 @@ public class RoutingAllocation {
             return this.changed;
         }
 
+        /**
+         * Get the {@link MetaData} referenced by this result
+         * @return referenced {@link MetaData}
+         */
+        public MetaData metaData() {
+            return metaData;
+        }
+
         /**
          * Get the {@link RoutingTable} referenced by this result
          * @return referenced {@link RoutingTable}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java
new file mode 100644
index 00000000000..7a7f4722e97
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java
@@ -0,0 +1,81 @@
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.test.ESAllocationTestCase;
+
+import java.util.Arrays;
+import java.util.HashSet;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ActiveAllocationIdTests extends ESAllocationTestCase {
+
+    public void testActiveAllocationIdsUpdated() {
+        AllocationService allocation = createAllocationService();
+
+        logger.info("creating an index with 1 shard, 2 replicas");
+        MetaData metaData = MetaData.builder()
+                .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+                // add index metadata where we have no routing nodes to check that allocation ids are not removed
+                .put(IndexMetaData.builder("test-old").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)
+                        .putActiveAllocationIds(0, new HashSet<>(Arrays.asList("x", "y"))))
+                .build();
+        RoutingTable routingTable = RoutingTable.builder()
+                .addAsNew(metaData.index("test"))
+                .build();
+        ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+        logger.info("adding three nodes and performing rerouting");
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(
+                newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+        RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
+        clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+
+        assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(0));
+        assertThat(clusterState.metaData().index("test-old").activeAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
+
+        logger.info("start primary shard");
+        rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+        clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+
+        assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).size(), equalTo(1));
+        assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(1));
+        assertThat(clusterState.getRoutingTable().shardsWithState(STARTED).get(0).allocationId().getId(),
+                equalTo(clusterState.metaData().index("test").activeAllocationIds(0).iterator().next()));
+        assertThat(clusterState.metaData().index("test-old").activeAllocationIds(0), equalTo(new HashSet<>(Arrays.asList("x", "y"))));
+
+        logger.info("start replica shards");
+        rerouteResult = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
+        clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+
+        assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(3));
+
+        logger.info("remove a node");
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+                .remove("node1"))
+                .build();
+        rerouteResult = allocation.reroute(clusterState, "reroute");
+        clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+
+        assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2));
+
+        logger.info("remove all remaining nodes");
+        clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+                .remove("node2").remove("node3"))
+                .build();
+        rerouteResult = allocation.reroute(clusterState, "reroute");
+        clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+
+        // active allocation ids should not be updated
+        assertThat(clusterState.getRoutingTable().shardsWithState(UNASSIGNED).size(), equalTo(3));
+        assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2));
+    }
+}

From fae494aa845bbb9e48797ac953ae5ec140c0e6eb Mon Sep 17 00:00:00 2001
From: javanna <cavannaluca@gmail.com>
Date: Thu, 26 Nov 2015 15:08:49 +0100
Subject: [PATCH 39/41] Java api: allow to specify ttl as a time value (either
 as a string or using TimeValue)

The ttl could be specified as a time value only via the REST layer. That is now possible via java api too, either as a string or as a proper TimeValue. The internal format in IndexRequest becomes now TimeValue, which will then still converted to a long before storing the document.

Closes #15047
---
 .../action/bulk/BulkRequest.java              |  6 +--
 .../action/bulk/TransportShardBulkAction.java |  2 +-
 .../action/index/IndexRequest.java            | 52 ++++++++++++++-----
 .../action/index/IndexRequestBuilder.java     | 21 +++++++-
 .../action/index/TransportIndexAction.java    |  2 +-
 .../TransportReplicationAction.java           | 10 ++--
 .../action/update/UpdateHelper.java           | 31 +++++------
 .../action/update/UpdateRequestBuilder.java   | 23 +++++++-
 .../index/mapper/SourceToParse.java           | 15 ++++--
 .../rest/action/index/RestIndexAction.java    |  2 +-
 .../rest/action/update/RestUpdateAction.java  |  4 +-
 .../action/index/IndexRequestTests.java       | 46 ++++++++++++++--
 .../action/update/UpdateRequestTests.java     |  9 ++--
 .../index/mapper/ttl/TTLMappingTests.java     |  2 +-
 14 files changed, 162 insertions(+), 63 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
index 2eadbb5a6b2..02e0ea40d65 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -300,7 +300,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
                 String parent = null;
                 String[] fields = defaultFields;
                 String timestamp = null;
-                Long ttl = null;
+                TimeValue ttl = null;
                 String opType = null;
                 long version = Versions.MATCH_ANY;
                 VersionType versionType = VersionType.INTERNAL;
@@ -333,9 +333,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
                                 timestamp = parser.text();
                             } else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
                                 if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
-                                    ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis();
+                                    ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
                                 } else {
-                                    ttl = parser.longValue();
+                                    ttl = new TimeValue(parser.longValue());
                                 }
                             } else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
                                 opType = parser.text();
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 0f00b87b12a..a4565cf4cfc 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -335,7 +335,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
             indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index());
         }
 
-        return executeIndexRequestOnPrimary(request, indexRequest, indexShard);
+        return executeIndexRequestOnPrimary(indexRequest, indexShard);
     }
 
     private WriteResult<DeleteResponse> shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) {
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 88ade451404..501c003c249 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.*;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.VersionType;
@@ -136,7 +137,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
     private String parent;
     @Nullable
     private String timestamp;
-    private long ttl = -1;
+    @Nullable
+    private TimeValue ttl;
 
     private BytesReference source;
 
@@ -229,6 +231,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
         if (!versionType.validateVersionForWrites(version)) {
             validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
         }
+
+        if (ttl != null) {
+            if (ttl.millis() < 0) {
+                validationException = addValidationError("ttl must not be negative", validationException);
+            }
+        }
         return validationException;
     }
 
@@ -324,22 +332,33 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
     }
 
     /**
-     * Sets the relative ttl value. It musts be &gt; 0 as it makes little sense otherwise. Setting it
-     * to <tt>null</tt> will reset to have no ttl.
+     * Sets the ttl value as a time value expression.
      */
-    public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException {
-        if (ttl == null) {
-            this.ttl = -1;
-            return this;
-        }
-        if (ttl <= 0) {
-            throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
-        }
+    public IndexRequest ttl(String ttl) {
+        this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
+        return this;
+    }
+
+    /**
+     * Sets the ttl as a {@link TimeValue} instance.
+     */
+    public IndexRequest ttl(TimeValue ttl) {
         this.ttl = ttl;
         return this;
     }
 
-    public long ttl() {
+    /**
+     * Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
+     */
+    public IndexRequest ttl(long ttl) {
+        this.ttl = new TimeValue(ttl);
+        return this;
+    }
+
+    /**
+     * Returns the ttl as a {@link TimeValue}
+     */
+    public TimeValue ttl() {
         return this.ttl;
     }
 
@@ -665,7 +684,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
         routing = in.readOptionalString();
         parent = in.readOptionalString();
         timestamp = in.readOptionalString();
-        ttl = in.readLong();
+        ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
         source = in.readBytesReference();
 
         opType = OpType.fromId(in.readByte());
@@ -682,7 +701,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
         out.writeOptionalString(routing);
         out.writeOptionalString(parent);
         out.writeOptionalString(timestamp);
-        out.writeLong(ttl);
+        if (ttl == null) {
+            out.writeBoolean(false);
+        } else {
+            out.writeBoolean(true);
+            ttl.writeTo(out);
+        }
         out.writeBytesReference(source);
         out.writeByte(opType.id());
         out.writeBoolean(refresh);
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
index 2df8fec6d22..f7134d84843 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java
@@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
 import org.elasticsearch.client.ElasticsearchClient;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.index.VersionType;
@@ -254,9 +255,27 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
         return this;
     }
 
-    // Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
+    /**
+     * Sets the ttl value as a time value expression.
+     */
+    public IndexRequestBuilder setTTL(String ttl) {
+        request.ttl(ttl);
+        return this;
+    }
+
+    /**
+     * Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
+     */
     public IndexRequestBuilder setTTL(long ttl) {
         request.ttl(ttl);
         return this;
     }
+
+    /**
+     * Sets the ttl as a {@link TimeValue} instance.
+     */
+    public IndexRequestBuilder setTTL(TimeValue ttl) {
+        request.ttl(ttl);
+        return this;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
index 63b82377d8a..3ffb5765e8c 100644
--- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java
@@ -166,7 +166,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
         IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
         IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
 
-        final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
+        final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard);
 
         final IndexResponse response = result.response;
         final Translog.Location location = result.location;
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 805778ccdeb..62802910cdb 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.ActionWriteResponse;
 import org.elasticsearch.action.UnavailableShardsException;
 import org.elasticsearch.action.WriteConsistencyLevel;
-import org.elasticsearch.action.bulk.BulkShardRequest;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.action.index.IndexRequest.OpType;
 import org.elasticsearch.action.index.IndexResponse;
@@ -1074,23 +1073,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
 
     /** Utility method to create either an index or a create operation depending
      *  on the {@link OpType} of the request. */
-    private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
+    private Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
         SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
                 .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
             return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
-
     }
 
     /** Execute the given {@link IndexRequest} on a primary shard, throwing a
      *  {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
-    protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
-        Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
+    protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard) throws Throwable {
+        Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
         Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
         final ShardId shardId = indexShard.shardId();
         if (update != null) {
             final String indexName = shardId.getIndex();
             mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
-            operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
+            operation = prepareIndexOperationOnPrimary(request, indexShard);
             update = operation.parsedDoc().dynamicMappingsUpdate();
             if (update != null) {
                 throw new RetryOnPrimaryException(shardId,
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
index 010142b0b4c..4bdcd43023f 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
@@ -88,7 +88,7 @@ public class UpdateHelper extends AbstractComponent {
                 throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
             }
             IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
-            Long ttl = indexRequest.ttl();
+            TimeValue ttl = indexRequest.ttl();
             if (request.scriptedUpsert() && request.script() != null) {
                 // Run the script to perform the create logic
                 IndexRequest upsert = request.upsertRequest();
@@ -99,7 +99,7 @@ public class UpdateHelper extends AbstractComponent {
                 ctx.put("_source", upsertDoc);
                 ctx = executeScript(request, ctx);
                 //Allow the script to set TTL using ctx._ttl
-                if (ttl < 0) {
+                if (ttl == null) {
                     ttl = getTTLFromScriptContext(ctx);
                 }
 
@@ -124,7 +124,7 @@ public class UpdateHelper extends AbstractComponent {
             indexRequest.index(request.index()).type(request.type()).id(request.id())
                     // it has to be a "create!"
                     .create(true)
-                    .ttl(ttl == null || ttl < 0 ? null : ttl)
+                    .ttl(ttl)
                     .refresh(request.refresh())
                     .routing(request.routing())
                     .parent(request.parent())
@@ -151,7 +151,7 @@ public class UpdateHelper extends AbstractComponent {
         Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
         String operation = null;
         String timestamp = null;
-        Long ttl = null;
+        TimeValue ttl = null;
         final Map<String, Object> updatedSourceAsMap;
         final XContentType updateSourceContentType = sourceAndContent.v1();
         String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
@@ -160,7 +160,7 @@ public class UpdateHelper extends AbstractComponent {
         if (request.script() == null && request.doc() != null) {
             IndexRequest indexRequest = request.doc();
             updatedSourceAsMap = sourceAndContent.v2();
-            if (indexRequest.ttl() > 0) {
+            if (indexRequest.ttl() != null) {
                 ttl = indexRequest.ttl();
             }
             timestamp = indexRequest.timestamp();
@@ -211,9 +211,9 @@ public class UpdateHelper extends AbstractComponent {
         // apply script to update the source
         // No TTL has been given in the update script so we keep previous TTL value if there is one
         if (ttl == null) {
-            ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
-            if (ttl != null) {
-                ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved
+            Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
+            if (ttlAsLong != null) {
+                ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
             }
         }
 
@@ -256,17 +256,15 @@ public class UpdateHelper extends AbstractComponent {
         return ctx;
     }
 
-    private Long getTTLFromScriptContext(Map<String, Object> ctx) {
-        Long ttl = null;
+    private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
         Object fetchedTTL = ctx.get("_ttl");
         if (fetchedTTL != null) {
             if (fetchedTTL instanceof Number) {
-                ttl = ((Number) fetchedTTL).longValue();
-            } else {
-                ttl = TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl").millis();
+                return new TimeValue(((Number) fetchedTTL).longValue());
             }
+            return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
         }
-        return ttl;
+        return null;
     }
 
     /**
@@ -337,13 +335,10 @@ public class UpdateHelper extends AbstractComponent {
         }
     }
 
-    public static enum Operation {
-
+    public enum Operation {
         UPSERT,
         INDEX,
         DELETE,
         NONE
-
     }
-
 }
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index 5b0a5bb9c5b..30b636f4efc 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
 import org.elasticsearch.client.ElasticsearchClient;
 import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.index.VersionType;
@@ -325,7 +326,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
     }
 
     /**
-     * Set the new ttl of the document. Note that if detectNoop is true (the default)
+     * Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
      * and the source of the document isn't changed then the ttl update won't take
      * effect.
      */
@@ -333,4 +334,24 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
         request.doc().ttl(ttl);
         return this;
     }
+
+    /**
+     * Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
+     * and the source of the document isn't changed then the ttl update won't take
+     * effect.
+     */
+    public UpdateRequestBuilder setTtl(String ttl) {
+        request.doc().ttl(ttl);
+        return this;
+    }
+
+    /**
+     * Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
+     * and the source of the document isn't changed then the ttl update won't take
+     * effect.
+     */
+    public UpdateRequestBuilder setTtl(TimeValue ttl) {
+        request.doc().ttl(ttl);
+        return this;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java
index e9d62c8e88d..f65072d489e 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java
@@ -20,6 +20,7 @@
 package org.elasticsearch.index.mapper;
 
 import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentParser;
 
 /**
@@ -159,16 +160,22 @@ public class SourceToParse {
         return this.ttl;
     }
 
+    public SourceToParse ttl(TimeValue ttl) {
+        if (ttl == null) {
+            this.ttl = -1;
+            return this;
+        }
+        this.ttl = ttl.millis();
+        return this;
+    }
+
     public SourceToParse ttl(long ttl) {
         this.ttl = ttl;
         return this;
     }
 
-    public static enum Origin {
-
+    public enum Origin {
         PRIMARY,
         REPLICA
-
     }
-
 }
diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java
index d0d0fe68a13..c7fc29155cc 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java
@@ -74,7 +74,7 @@ public class RestIndexAction extends BaseRestHandler {
         indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
         indexRequest.timestamp(request.param("timestamp"));
         if (request.hasParam("ttl")) {
-            indexRequest.ttl(request.paramAsTime("ttl", null).millis());
+            indexRequest.ttl(request.param("ttl"));
         }
         indexRequest.source(request.content());
         indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java
index a23780db62e..76e96ab7e7f 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java
@@ -105,7 +105,7 @@ public class RestUpdateAction extends BaseRestHandler {
                 upsertRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
                 upsertRequest.timestamp(request.param("timestamp"));
                 if (request.hasParam("ttl")) {
-                    upsertRequest.ttl(request.paramAsTime("ttl", null).millis());
+                    upsertRequest.ttl(request.param("ttl"));
                 }
                 upsertRequest.version(RestActions.parseVersion(request));
                 upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType()));
@@ -116,7 +116,7 @@ public class RestUpdateAction extends BaseRestHandler {
                 doc.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
                 doc.timestamp(request.param("timestamp"));
                 if (request.hasParam("ttl")) {
-                    doc.ttl(request.paramAsTime("ttl", null).millis());
+                    doc.ttl(request.param("ttl"));
                 }
                 doc.version(RestActions.parseVersion(request));
                 doc.versionType(VersionType.fromString(request.param("version_type"), doc.versionType()));
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
index 0739b5da02d..1d3a9e18757 100644
--- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
@@ -18,6 +18,8 @@
  */
 package org.elasticsearch.action.index;
 
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.VersionType;
 import org.elasticsearch.test.ESTestCase;
 
@@ -25,10 +27,7 @@ import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.empty;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.*;
 
 /**
  */
@@ -67,4 +66,43 @@ public class IndexRequestTests extends ESTestCase {
         request.version(randomIntBetween(0, Integer.MAX_VALUE));
         assertThat(request.validate().validationErrors(), not(empty()));
     }
+
+    public void testSetTTLAsTimeValue() {
+        IndexRequest indexRequest = new IndexRequest();
+        TimeValue ttl = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl");
+        indexRequest.ttl(ttl);
+        assertThat(indexRequest.ttl(), equalTo(ttl));
+    }
+
+    public void testSetTTLAsString() {
+        IndexRequest indexRequest = new IndexRequest();
+        String ttlAsString = randomTimeValue();
+        TimeValue ttl = TimeValue.parseTimeValue(ttlAsString, null, "ttl");
+        indexRequest.ttl(ttlAsString);
+        assertThat(indexRequest.ttl(), equalTo(ttl));
+    }
+
+    public void testSetTTLAsLong() {
+        IndexRequest indexRequest = new IndexRequest();
+        String ttlAsString = randomTimeValue();
+        TimeValue ttl = TimeValue.parseTimeValue(ttlAsString, null, "ttl");
+        indexRequest.ttl(ttl.millis());
+        assertThat(indexRequest.ttl(), equalTo(ttl));
+    }
+
+    public void testValidateTTL() {
+        IndexRequest indexRequest = new IndexRequest("index", "type");
+        if (randomBoolean()) {
+            indexRequest.ttl(randomIntBetween(Integer.MIN_VALUE, -1));
+        } else {
+            if (randomBoolean()) {
+                indexRequest.ttl(new TimeValue(randomIntBetween(Integer.MIN_VALUE, -1)));
+            } else {
+                indexRequest.ttl(randomIntBetween(Integer.MIN_VALUE, -1) + "ms");
+            }
+        }
+        ActionRequestValidationException validate = indexRequest.validate();
+        assertThat(validate, notNullValue());
+        assertThat(validate.getMessage(), containsString("ttl must not be negative"));
+    }
 }
diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
index bc9bd2b9095..6cf7a3384ab 100644
--- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
@@ -23,6 +23,7 @@ import org.elasticsearch.Version;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.common.io.stream.Streamable;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.index.get.GetResult;
@@ -33,11 +34,7 @@ import org.elasticsearch.test.ESTestCase;
 import java.util.Map;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.notNullValue;
-import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.*;
 
 public class UpdateRequestTests extends ESTestCase {
     public void testUpdateRequest() throws Exception {
@@ -127,7 +124,7 @@ public class UpdateRequestTests extends ESTestCase {
 
     // Related to issue 3256
     public void testUpdateRequestWithTTL() throws Exception {
-        long providedTTLValue = randomIntBetween(500, 1000);
+        TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl");
         Settings settings = settings(Version.CURRENT).build();
 
         UpdateHelper updateHelper = new UpdateHelper(settings, null);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
index 7802e866819..ff557f45589 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
@@ -295,7 +295,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase {
         request.process(MetaData.builder().build(), mappingMetaData, true, "test");
 
         // _ttl in a document never worked, so backcompat is ignoring the field
-        assertEquals(-1, request.ttl());
+        assertNull(request.ttl());
         assertNull(docMapper.parse("test", "type", "1", doc.bytes()).rootDoc().get("_ttl"));
     }
 

From 3f86adddbfac71f2d884fcdf5add8ce0e8f67e12 Mon Sep 17 00:00:00 2001
From: Adrien Grand <jpountz@gmail.com>
Date: Wed, 2 Dec 2015 10:47:04 +0100
Subject: [PATCH 40/41] Remove MergeMappingException.

Failures to merge a mapping can either come as a MergeMappingException if they
come from Mapper.merge or as an IllegalArgumentException if they come from
FieldTypeLookup.checkCompatibility. I think we should settle on one: this pull
request replaces all usage of MergeMappingException with
IllegalArgumentException.
---
 .../elasticsearch/ElasticsearchException.java |  2 +-
 .../metadata/MetaDataMappingService.java      |  3 +-
 .../index/mapper/FieldMapper.java             |  4 +-
 .../elasticsearch/index/mapper/Mapper.java    |  2 +-
 .../index/mapper/MapperService.java           |  2 +-
 .../index/mapper/MergeMappingException.java   | 62 -------------------
 .../mapper/core/CompletionFieldMapper.java    |  2 +-
 .../index/mapper/core/NumberFieldMapper.java  |  2 +-
 .../index/mapper/core/StringFieldMapper.java  |  3 +-
 .../mapper/core/TokenCountFieldMapper.java    |  3 +-
 .../mapper/geo/BaseGeoPointFieldMapper.java   |  3 +-
 .../mapper/geo/GeoPointFieldMapperLegacy.java |  3 +-
 .../index/mapper/geo/GeoShapeFieldMapper.java |  3 +-
 .../index/mapper/internal/AllFieldMapper.java |  3 +-
 .../index/mapper/internal/IdFieldMapper.java  |  3 +-
 .../mapper/internal/IndexFieldMapper.java     |  3 +-
 .../mapper/internal/ParentFieldMapper.java    |  3 +-
 .../mapper/internal/RoutingFieldMapper.java   |  3 +-
 .../mapper/internal/SourceFieldMapper.java    |  3 +-
 .../index/mapper/internal/TTLFieldMapper.java |  3 +-
 .../mapper/internal/TimestampFieldMapper.java |  3 +-
 .../mapper/internal/TypeFieldMapper.java      |  3 +-
 .../index/mapper/internal/UidFieldMapper.java |  3 +-
 .../mapper/internal/VersionFieldMapper.java   |  3 +-
 .../index/mapper/object/ObjectMapper.java     |  2 +-
 .../ExceptionSerializationTests.java          |  7 ---
 .../mapper/externalvalues/ExternalMapper.java |  3 +-
 .../ExternalMetadataMapper.java               |  3 +-
 .../index/mapper/ttl/TTLMappingTests.java     |  5 +-
 .../update/UpdateMappingOnClusterIT.java      |  5 +-
 .../mapper/update/UpdateMappingTests.java     | 10 +--
 .../mapping/UpdateMappingIntegrationIT.java   |  5 +-
 .../search/child/ChildQuerySearchIT.java      |  3 +-
 .../mapper/attachments/AttachmentMapper.java  |  2 +-
 .../index/mapper/size/SizeFieldMapper.java    |  3 +-
 35 files changed, 41 insertions(+), 134 deletions(-)
 delete mode 100644 core/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java

diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index 7ae10e0a5a6..18376aff88f 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -554,7 +554,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
         NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
         ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
         AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
-        MERGE_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.MergeMappingException.class, org.elasticsearch.index.mapper.MergeMappingException::new, 87),
+        // 87 used to be for MergeMappingException
         INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
         PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
         REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 160faca372c..957125703b6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.NodeServicesProvider;
 import org.elasticsearch.index.mapper.DocumentMapper;
 import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.indices.IndicesService;
 import org.elasticsearch.indices.InvalidTypeNameException;
@@ -255,7 +254,7 @@ public class MetaDataMappingService extends AbstractComponent {
                         MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
                         // if we have conflicts, throw an exception
                         if (mergeResult.hasConflicts()) {
-                            throw new MergeMappingException(mergeResult.buildConflicts());
+                            throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
                         }
                     } else {
                         // TODO: can we find a better place for this validation?
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
index 45bef68ee00..552067ac337 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
@@ -360,7 +360,7 @@ public abstract class FieldMapper extends Mapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         if (!this.getClass().equals(mergeWith.getClass())) {
             String mergedType = mergeWith.getClass().getSimpleName();
             if (mergeWith instanceof FieldMapper) {
@@ -614,7 +614,7 @@ public abstract class FieldMapper extends Mapper {
         }
 
         // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
-        public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+        public void merge(Mapper mergeWith, MergeResult mergeResult) {
             FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
 
             List<FieldMapper> newFieldMappers = null;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
index 9ca34e1c573..a8d0c0a706d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -154,5 +154,5 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
     /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
     public abstract String name();
 
-    public abstract void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException;
+    public abstract void merge(Mapper mergeWith, MergeResult mergeResult);
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index 384095ba137..7a908a1238b 100755
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -253,7 +253,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
                 // simulate first
                 MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
                 if (result.hasConflicts()) {
-                    throw new MergeMappingException(result.buildConflicts());
+                    throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
                 }
                 // then apply for real
                 result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java b/core/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java
deleted file mode 100644
index 4a5e75fcb56..00000000000
--- a/core/src/main/java/org/elasticsearch/index/mapper/MergeMappingException.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.mapper;
-
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.rest.RestStatus;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Objects;
-
-/**
- *
- */
-public final class MergeMappingException extends MapperException {
-
-    private final String[] failures;
-
-    public MergeMappingException(String[] failures) {
-        super("Merge failed with failures {" + Arrays.toString(failures) + "}");
-        Objects.requireNonNull(failures, "failures must be non-null");
-        this.failures = failures;
-    }
-
-    public MergeMappingException(StreamInput in) throws IOException {
-        super(in);
-        failures = in.readStringArray();
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        super.writeTo(out);
-        out.writeStringArray(failures);
-    }
-
-    public String[] failures() {
-        return failures;
-    }
-
-    @Override
-    public RestStatus status() {
-        return RestStatus.BAD_REQUEST;
-    }
-}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
index d3cf998ef2c..5b4df635a34 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
@@ -605,7 +605,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
         if (!mergeResult.simulate()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
index 3fba511fb52..7045ec2e58b 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
@@ -245,7 +245,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
index cb89cb4973b..a5c681d59a5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
@@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.internal.AllFieldMapper;
@@ -360,7 +359,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
index faa2b7e66a0..8348892e44a 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
@@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
@@ -191,7 +190,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
index 1c33b66fbc8..0b57d866ddd 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
@@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
@@ -389,7 +388,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
index 350c8f5d668..84e6bde07ac 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
@@ -39,7 +39,6 @@ import org.elasticsearch.index.mapper.ContentPath;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
@@ -298,7 +297,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
index 6d385875b18..7e784324f36 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
@@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 
@@ -472,7 +471,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         if (!this.getClass().equals(mergeWith.getClass())) {
             return;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
index 9de14810305..3166a683397 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
@@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -310,7 +309,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
             mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
         }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
index 195d736d323..16b6c4c56da 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
@@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -332,7 +331,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // do nothing here, no merging, but also no exception
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
index b82639e75b6..962332b5c4b 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
@@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -280,7 +279,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
         if (!mergeResult.simulate()) {
             if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
index d6221ae1db3..760259a1802 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
@@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -372,7 +371,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         super.merge(mergeWith, mergeResult);
         ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
         if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
index 43be36d177b..18d0645d2d5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
@@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -250,7 +249,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // do nothing here, no merging, but also no exception
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
index 1e41f63e305..da3b8dbc5ab 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
@@ -44,7 +44,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -442,7 +441,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
         if (mergeResult.simulate()) {
             if (this.enabled != sourceMergeWith.enabled) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
index 54f631427cc..9a18befe622 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
@@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -259,7 +258,7 @@ public class TTLFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
         if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
             if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
index 05d3a24cd6e..468243d63cf 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
@@ -33,7 +33,6 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
@@ -380,7 +379,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
         super.merge(mergeWith, mergeResult);
         if (!mergeResult.simulate()) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
index 248718d46e4..d4acc3c5975 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
@@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -226,7 +225,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // do nothing here, no merging, but also no exception
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
index b3c3d983361..ef4c48e62e3 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
@@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -226,7 +225,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // do nothing here, no merging, but also no exception
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
index 42a58117687..292a622ab73 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
@@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -167,7 +166,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // nothing to do
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
index 9dc1a1d3dc7..88f89719050 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
@@ -464,7 +464,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
     }
 
     @Override
-    public void merge(final Mapper mergeWith, final MergeResult mergeResult) throws MergeMappingException {
+    public void merge(final Mapper mergeWith, final MergeResult mergeResult) {
         if (!(mergeWith instanceof ObjectMapper)) {
             mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
             return;
diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
index 6e04de80c81..21ee6de6b55 100644
--- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
@@ -51,7 +51,6 @@ import org.elasticsearch.index.AlreadyExpiredException;
 import org.elasticsearch.index.Index;
 import org.elasticsearch.index.engine.IndexFailedEngineException;
 import org.elasticsearch.index.engine.RecoveryEngineException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.query.QueryShardException;
 import org.elasticsearch.index.shard.IllegalIndexShardStateException;
 import org.elasticsearch.index.shard.IndexShardState;
@@ -275,11 +274,6 @@ public class ExceptionSerializationTests extends ESTestCase {
         assertEquals(-3, alreadyExpiredException.now());
     }
 
-    public void testMergeMappingException() throws IOException {
-        MergeMappingException ex = serialize(new MergeMappingException(new String[]{"one", "two"}));
-        assertArrayEquals(ex.failures(), new String[]{"one", "two"});
-    }
-
     public void testActionNotFoundTransportException() throws IOException {
         ActionNotFoundTransportException ex = serialize(new ActionNotFoundTransportException("AACCCTION"));
         assertEquals("AACCCTION", ex.action());
@@ -725,7 +719,6 @@ public class ExceptionSerializationTests extends ESTestCase {
         ids.put(84, org.elasticsearch.transport.NodeDisconnectedException.class);
         ids.put(85, org.elasticsearch.index.AlreadyExpiredException.class);
         ids.put(86, org.elasticsearch.search.aggregations.AggregationExecutionException.class);
-        ids.put(87, org.elasticsearch.index.mapper.MergeMappingException.class);
         ids.put(88, org.elasticsearch.indices.InvalidIndexTemplateException.class);
         ids.put(89, org.elasticsearch.percolator.PercolateException.class);
         ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java
index 61f058dab64..e5d08db8d9f 100755
--- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java
@@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
@@ -220,7 +219,7 @@ public class ExternalMapper extends FieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // ignore this for now
     }
 
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java
index 9ca07a9c4b6..dae8bc67fda 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java
@@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -67,7 +66,7 @@ public class ExternalMetadataMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         if (!(mergeWith instanceof ExternalMetadataMapper)) {
             mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this);
         }
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
index ff557f45589..efe07615532 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
@@ -35,7 +35,6 @@ import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.mapper.DocumentMapper;
 import org.elasticsearch.index.mapper.DocumentMapperParser;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.ParsedDocument;
 import org.elasticsearch.index.mapper.SourceToParse;
@@ -169,8 +168,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase {
         try {
             client().admin().indices().preparePutMapping("testindex").setSource(mappingWithTtlDisabled).setType("type").get();
             fail();
-        } catch (MergeMappingException mme) {
-            assertThat(mme.getDetailedMessage(), containsString("_ttl cannot be disabled once it was enabled."));
+        } catch (IllegalArgumentException e) {
+            assertThat(e.getMessage(), containsString("_ttl cannot be disabled once it was enabled."));
         }
         GetMappingsResponse mappingsAfterUpdateResponse = client().admin().indices().prepareGetMappings("testindex").addTypes("type").get();
         assertThat(mappingsBeforeUpdateResponse.getMappings().get("testindex").get("type").source(), equalTo(mappingsAfterUpdateResponse.getMappings().get("testindex").get("type").source()));
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java
index bf97cec3c46..1edc2bb131a 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java
@@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.test.ESIntegTestCase;
 
 import java.util.HashMap;
@@ -150,9 +149,9 @@ public class UpdateMappingOnClusterIT extends ESIntegTestCase {
         try {
             client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mappingUpdate).get();
             fail();
-        } catch (MergeMappingException e) {
+        } catch (IllegalArgumentException e) {
             for (String errorMessage : errorMessages) {
-                assertThat(e.getDetailedMessage(), containsString(errorMessage));
+                assertThat(e.getMessage(), containsString(errorMessage));
             }
         }
         compareMappingOnNodes(mappingsBeforeUpdateResponse);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java
index 7c15875bc11..b44081fc910 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java
@@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.mapper.DocumentMapper;
 import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.core.LongFieldMapper;
 import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -38,6 +37,7 @@ import java.io.IOException;
 import java.util.LinkedHashMap;
 
 import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
+import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 
@@ -122,15 +122,15 @@ public class UpdateMappingTests extends ESSingleNodeTestCase {
         try {
             mapperService.merge("type", new CompressedXContent(update.string()), false, false);
             fail();
-        } catch (MergeMappingException e) {
-            // expected
+        } catch (IllegalArgumentException e) {
+            assertThat(e.getMessage(), containsString("Merge failed"));
         }
 
         try {
             mapperService.merge("type", new CompressedXContent(update.string()), false, false);
             fail();
-        } catch (MergeMappingException e) {
-            // expected
+        } catch (IllegalArgumentException e) {
+            assertThat(e.getMessage(), containsString("Merge failed"));
         }
 
         assertTrue(mapperService.documentMapper("type").mapping().root().getMapper("foo") instanceof LongFieldMapper);
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
index 68902fd22f8..57ba469a357 100644
--- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
@@ -31,7 +31,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.xcontent.json.JsonXContent;
 import org.elasticsearch.index.mapper.MapperParsingException;
 import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.test.ESIntegTestCase;
 import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
 import org.hamcrest.Matchers;
@@ -140,7 +139,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
             client().admin().indices().preparePutMapping("test").setType("type")
                     .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}").execute().actionGet();
             fail("Expected MergeMappingException");
-        } catch (MergeMappingException e) {
+        } catch (IllegalArgumentException e) {
             assertThat(e.getMessage(), containsString("mapper [body] of different type"));
         }
     }
@@ -154,7 +153,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
                     .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": true }}}}}").execute()
                     .actionGet();
             fail("Expected MergeMappingException");
-        } catch (MergeMappingException e) {
+        } catch (IllegalArgumentException e) {
             assertThat(e.getMessage(), containsString("mapper [body] has different [omit_norms]"));
         }
     }
diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java
index e360649e919..4be2b36fbe6 100644
--- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java
@@ -31,7 +31,6 @@ import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.IndexModule;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.query.HasChildQueryBuilder;
 import org.elasticsearch.index.query.IdsQueryBuilder;
 import org.elasticsearch.index.query.QueryBuilder;
@@ -1176,7 +1175,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
                     .startObject("_parent").field("type", "parent").endObject()
                     .endObject().endObject()).get();
             fail();
-        } catch (MergeMappingException e) {
+        } catch (IllegalArgumentException e) {
             assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]"));
         }
     }
diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java
index 2e4c08992c5..eb0e143c946 100644
--- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java
+++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java
@@ -602,7 +602,7 @@ public class AttachmentMapper extends FieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         // ignore this for now
     }
 
diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
index 4de98f57d2f..aaf46553a75 100644
--- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
+++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
@@ -28,7 +28,6 @@ import org.elasticsearch.index.analysis.NumericIntegerAnalyzer;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeMappingException;
 import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
@@ -178,7 +177,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+    public void merge(Mapper mergeWith, MergeResult mergeResult) {
         SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
         if (!mergeResult.simulate()) {
             if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {

From 5d0689581bac403cf615bd3c55cc60d9fa21b381 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20B=C3=BCscher?= <christoph@elastic.co>
Date: Tue, 24 Nov 2015 14:06:00 +0100
Subject: [PATCH 41/41] Geo: Remove `translated` state from LineStringBuilder

The `translated` flag makes LineStringBuilder stateful and gets set
to true under certain conditions when building a Shape or Geometry
from the ShapeBuilder. This makes building operations not be idempotent,
so calling build() more than once on a LineStringBuilder might change the
builder itself. This PR fixes this by replacing the instance variable by
a local `translated` flag that is only updated internally during the
building process and created again on any subsequent calls to build()
or buildGeometry().
---
 .../geo/builders/LineStringBuilder.java       |   2 -
 .../geo/builders/MultiPolygonBuilder.java     |   2 -
 .../common/geo/builders/PolygonBuilder.java   | 155 +++++++++++++++++-
 .../common/geo/builders/ShapeBuilder.java     | 147 +----------------
 4 files changed, 151 insertions(+), 155 deletions(-)

diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
index 4bf84ea8f50..c7ba9b72f55 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
@@ -34,8 +34,6 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
 
     public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
 
-    protected boolean translated = false;
-
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
index 7911ddff835..e7762e51b61 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
@@ -89,6 +89,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
             return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
         //note: ShapeCollection is probably faster than a Multi* geom.
     }
-
-
 }
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
index 94d8fc049d8..04540df27e9 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -38,6 +38,7 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
@@ -141,9 +142,10 @@ public class PolygonBuilder extends ShapeBuilder {
 
         Edge[] edges = new Edge[numEdges];
         Edge[] holeComponents = new Edge[holes.size()];
-        int offset = createEdges(0, orientation, shell, null, edges, 0);
+        final AtomicBoolean translated = new AtomicBoolean(false);
+        int offset = createEdges(0, orientation, shell, null, edges, 0, translated);
         for (int i = 0; i < holes.size(); i++) {
-            int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset);
+            int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset, translated);
             holeComponents[i] = edges[offset];
             offset += length;
         }
@@ -508,14 +510,157 @@ public class PolygonBuilder extends ShapeBuilder {
     }
 
     private static int createEdges(int component, Orientation orientation, LineStringBuilder shell,
-                                   LineStringBuilder hole,
-                                   Edge[] edges, int offset) {
+                                   LineStringBuilder hole, Edge[] edges, int offset, final AtomicBoolean translated) {
         // inner rings (holes) have an opposite direction than the outer rings
         // XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F)
         boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
         // set the points array accordingly (shell or hole)
         Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
-        Edge.ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1);
+        ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
         return points.length-1;
     }
+
+    /**
+     * Create a connected list of a list of coordinates
+     *
+     * @param points
+     *            array of point
+     * @param offset
+     *            index of the first point
+     * @param length
+     *            number of points
+     * @return Array of edges
+     */
+    private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
+                                 Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
+        // calculate the direction of the points:
+        // find the point a the top of the set and check its
+        // neighbors orientation. So direction is equivalent
+        // to clockwise/counterclockwise
+        final int top = top(points, offset, length);
+        final int prev = (offset + ((top + length - 1) % length));
+        final int next = (offset + ((top + 1) % length));
+        boolean orientation = points[offset + prev].x > points[offset + next].x;
+
+        // OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
+        // since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
+        // thus if orientation is computed as cw, the logic will translate points across dateline
+        // and convert to a right handed system
+
+        // compute the bounding box and calculate range
+        double[] range = range(points, offset, length);
+        final double rng = range[1] - range[0];
+        // translate the points if the following is true
+        //   1.  shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
+        //       (translation would result in a collapsed poly)
+        //   2.  the shell of the candidate hole has been translated (to preserve the coordinate system)
+        boolean incorrectOrientation = component == 0 && handedness != orientation;
+        if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) {
+            translate(points);
+            // flip the translation bit if the shell is being translated
+            if (component == 0) {
+                translated.set(true);
+            }
+            // correct the orientation post translation (ccw for shell, cw for holes)
+            if (component == 0 || (component != 0 && handedness == orientation)) {
+                orientation = !orientation;
+            }
+        }
+        return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
+    }
+
+    private static final int top(Coordinate[] points, int offset, int length) {
+        int top = 0; // we start at 1 here since top points to 0
+        for (int i = 1; i < length; i++) {
+            if (points[offset + i].y < points[offset + top].y) {
+                top = i;
+            } else if (points[offset + i].y == points[offset + top].y) {
+                if (points[offset + i].x < points[offset + top].x) {
+                    top = i;
+                }
+            }
+        }
+        return top;
+    }
+
+    private static final double[] range(Coordinate[] points, int offset, int length) {
+        double minX = points[0].x;
+        double maxX = points[0].x;
+        double minY = points[0].y;
+        double maxY = points[0].y;
+        // compute the bounding coordinates (@todo: cleanup brute force)
+        for (int i = 1; i < length; ++i) {
+            if (points[offset + i].x < minX) {
+                minX = points[offset + i].x;
+            }
+            if (points[offset + i].x > maxX) {
+                maxX = points[offset + i].x;
+            }
+            if (points[offset + i].y < minY) {
+                minY = points[offset + i].y;
+            }
+            if (points[offset + i].y > maxY) {
+                maxY = points[offset + i].y;
+            }
+        }
+        return new double[] {minX, maxX, minY, maxY};
+    }
+
+    /**
+     * Concatenate a set of points to a polygon
+     *
+     * @param component
+     *            component id of the polygon
+     * @param direction
+     *            direction of the ring
+     * @param points
+     *            list of points to concatenate
+     * @param pointOffset
+     *            index of the first point
+     * @param edges
+     *            Array of edges to write the result to
+     * @param edgeOffset
+     *            index of the first edge in the result
+     * @param length
+     *            number of points to use
+     * @return the edges creates
+     */
+    private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
+            int length) {
+        assert edges.length >= length+edgeOffset;
+        assert points.length >= length+pointOffset;
+        edges[edgeOffset] = new Edge(points[pointOffset], null);
+        for (int i = 1; i < length; i++) {
+            if (direction) {
+                edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
+                edges[edgeOffset + i].component = component;
+            } else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
+                edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
+                edges[edgeOffset + i - 1].component = component;
+            } else {
+                throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
+            }
+        }
+
+        if (direction) {
+            edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
+            edges[edgeOffset].component = component;
+        } else {
+            edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
+            edges[edgeOffset + length - 1].component = component;
+        }
+
+        return edges;
+    }
+
+    /**
+     * Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
+     */
+    private static void translate(Coordinate[] points) {
+        for (Coordinate c : points) {
+            if (c.x < 0) {
+                c.x += 2*DATELINE;
+            }
+        }
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
index 7f153a9197f..d8689ee737f 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -362,150 +362,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
             }
         }
 
-        private static final int top(Coordinate[] points, int offset, int length) {
-            int top = 0; // we start at 1 here since top points to 0
-            for (int i = 1; i < length; i++) {
-                if (points[offset + i].y < points[offset + top].y) {
-                    top = i;
-                } else if (points[offset + i].y == points[offset + top].y) {
-                    if (points[offset + i].x < points[offset + top].x) {
-                        top = i;
-                    }
-                }
-            }
-            return top;
-        }
-
-        private static final double[] range(Coordinate[] points, int offset, int length) {
-            double minX = points[0].x;
-            double maxX = points[0].x;
-            double minY = points[0].y;
-            double maxY = points[0].y;
-            // compute the bounding coordinates (@todo: cleanup brute force)
-            for (int i = 1; i < length; ++i) {
-                if (points[offset + i].x < minX) {
-                    minX = points[offset + i].x;
-                }
-                if (points[offset + i].x > maxX) {
-                    maxX = points[offset + i].x;
-                }
-                if (points[offset + i].y < minY) {
-                    minY = points[offset + i].y;
-                }
-                if (points[offset + i].y > maxY) {
-                    maxY = points[offset + i].y;
-                }
-            }
-            return new double[] {minX, maxX, minY, maxY};
-        }
-
-        /**
-         * Concatenate a set of points to a polygon
-         *
-         * @param component
-         *            component id of the polygon
-         * @param direction
-         *            direction of the ring
-         * @param points
-         *            list of points to concatenate
-         * @param pointOffset
-         *            index of the first point
-         * @param edges
-         *            Array of edges to write the result to
-         * @param edgeOffset
-         *            index of the first edge in the result
-         * @param length
-         *            number of points to use
-         * @return the edges creates
-         */
-        private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
-                int length) {
-            assert edges.length >= length+edgeOffset;
-            assert points.length >= length+pointOffset;
-            edges[edgeOffset] = new Edge(points[pointOffset], null);
-            for (int i = 1; i < length; i++) {
-                if (direction) {
-                    edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
-                    edges[edgeOffset + i].component = component;
-                } else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
-                    edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
-                    edges[edgeOffset + i - 1].component = component;
-                } else {
-                    throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
-                }
-            }
-
-            if (direction) {
-                edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
-                edges[edgeOffset].component = component;
-            } else {
-                edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
-                edges[edgeOffset + length - 1].component = component;
-            }
-
-            return edges;
-        }
-
-        /**
-         * Create a connected list of a list of coordinates
-         *
-         * @param points
-         *            array of point
-         * @param offset
-         *            index of the first point
-         * @param length
-         *            number of points
-         * @return Array of edges
-         */
-        protected static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
-                                     Coordinate[] points, int offset, Edge[] edges, int toffset, int length) {
-            // calculate the direction of the points:
-            // find the point a the top of the set and check its
-            // neighbors orientation. So direction is equivalent
-            // to clockwise/counterclockwise
-            final int top = top(points, offset, length);
-            final int prev = (offset + ((top + length - 1) % length));
-            final int next = (offset + ((top + 1) % length));
-            boolean orientation = points[offset + prev].x > points[offset + next].x;
-
-            // OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
-            // since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
-            // thus if orientation is computed as cw, the logic will translate points across dateline
-            // and convert to a right handed system
-
-            // compute the bounding box and calculate range
-            double[] range = range(points, offset, length);
-            final double rng = range[1] - range[0];
-            // translate the points if the following is true
-            //   1.  shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
-            //       (translation would result in a collapsed poly)
-            //   2.  the shell of the candidate hole has been translated (to preserve the coordinate system)
-            boolean incorrectOrientation = component == 0 && handedness != orientation;
-            if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (shell.translated && component != 0)) {
-                translate(points);
-                // flip the translation bit if the shell is being translated
-                if (component == 0) {
-                    shell.translated = true;
-                }
-                // correct the orientation post translation (ccw for shell, cw for holes)
-                if (component == 0 || (component != 0 && handedness == orientation)) {
-                    orientation = !orientation;
-                }
-            }
-            return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
-        }
-
-        /**
-         * Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
-         */
-        protected static void translate(Coordinate[] points) {
-            for (Coordinate c : points) {
-                if (c.x < 0) {
-                    c.x += 2*DATELINE;
-                }
-            }
-        }
-
         /**
          * Set the intersection of this line segment to the given position
          *
@@ -517,7 +373,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
             return intersect = position(coordinate, next.coordinate, position);
         }
 
-        public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
+        protected static Coordinate position(Coordinate p1, Coordinate p2, double position) {
             if (position == 0) {
                 return p1;
             } else if (position == 1) {
@@ -542,7 +398,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
         public int compare(Edge o1, Edge o2) {
             return Double.compare(o1.intersect.y, o2.intersect.y);
         }
-
     }
 
     public static enum Orientation {