diff --git a/core/pom.xml b/core/pom.xml index f6bdebdd4d3..426d4c4d125 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -18,8 +18,6 @@ - - unshaded /usr/share/elasticsearch /usr/share/elasticsearch/bin @@ -415,9 +413,12 @@ - true + true + shaded + false true true + false com.google.guava:guava diff --git a/core/src/main/assemblies/common-bin.xml b/core/src/main/assemblies/common-bin.xml index 49a00eab4ee..011a9c8b2f1 100644 --- a/core/src/main/assemblies/common-bin.xml +++ b/core/src/main/assemblies/common-bin.xml @@ -11,6 +11,20 @@ com.spatial4j:spatial4j com.vividsolutions:jts org.codehaus.groovy:groovy-all + com.google.guava:guava + com.carrotsearch:hppc + com.fasterxml.jackson.core:jackson-core + com.fasterxml.jackson.dataformat:jackson-dataformat-smile + com.fasterxml.jackson.dataformat:jackson-dataformat-yaml + com.fasterxml.jackson.dataformat:jackson-dataformat-cbor + joda-time:joda-time + org.joda:joda-convert + io.netty:netty + com.ning:compress-lzf + com.github.spullara.mustache.java:compiler + com.tdunning:t-digest + org.apache.commons:commons-lang3 + commons-cli:commons-cli diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index e0e7a847d45..f59e271c04f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -544,6 +544,7 @@ public class GatewayAllocator extends AbstractComponent { @Override public void onFailure(String source, Throwable t) { + rerouting.set(false); logger.warn("failed to perform reroute post async fetch for {}", t, source); } }); diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index 572cda21d09..fae398e1a90 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -19,15 +19,10 @@ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.io.ByteStreams; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.*; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; @@ -902,13 +897,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements /// now, go over and clean files that are in the store, but were not in the snapshot try { for (String storeFile : store.directory().listAll()) { - if (!Store.isChecksum(storeFile) && !snapshotFiles.containPhysicalIndexFile(storeFile)) { - try { - store.deleteQuiet("restore", storeFile); - store.directory().deleteFile(storeFile); - } catch (IOException e) { - logger.warn("[{}] failed to delete file [{}] during snapshot cleanup", snapshotId, storeFile); - } + if (Store.isAutogenerated(storeFile) || snapshotFiles.containPhysicalIndexFile(storeFile)) { + continue; //skip write.lock, checksum files and files that exist in the snapshot + } + try { + store.deleteQuiet("restore", storeFile); + store.directory().deleteFile(storeFile); + } catch (IOException e) { + logger.warn("[{}] failed to delete file [{}] during snapshot cleanup", snapshotId, storeFile); } } } catch (IOException e) { diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index d92128a319c..2617e95b418 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -586,7 +586,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref try (Lock writeLock = Lucene.acquireWriteLock(directory)) { final StoreDirectory dir = directory; for (String existingFile : dir.listAll()) { - if (existingFile.equals(IndexWriter.WRITE_LOCK_NAME) || Store.isChecksum(existingFile) || sourceMetaData.contains(existingFile)) { + if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) { continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum) } try { @@ -1206,11 +1206,19 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref public static final String CHECKSUMS_PREFIX = "_checksums-"; - public static final boolean isChecksum(String name) { + public static boolean isChecksum(String name) { // TODO can we drowp .cks return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file } + /** + * Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup. + * This includes write lock and checksum files + */ + public static boolean isAutogenerated(String name) { + return IndexWriter.WRITE_LOCK_NAME.equals(name) || isChecksum(name); + } + /** * Produces a string representation of the given digest value. */ diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java index 2034dc41b81..68d682ddf7b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.concurrent.ExecutionException; @@ -164,7 +163,6 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest { } @Test @Slow - @TestLogging("cluster.routing.allocation.allocator:TRACE") public void multipleNodesShutdownNonMasterNodes() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "zen") diff --git a/docs/reference/indices/upgrade.asciidoc b/docs/reference/indices/upgrade.asciidoc index 046dde2fe8a..bb6747daf09 100644 --- a/docs/reference/indices/upgrade.asciidoc +++ b/docs/reference/indices/upgrade.asciidoc @@ -1,9 +1,64 @@ [[indices-upgrade]] == Upgrade -The upgrade API allows to upgrade one or more indices to the latest format -through an API. The upgrade process converts any segments written -with previous formats. +The upgrade API allows to upgrade one or more indices to the latest Lucene +format through an API. The upgrade process converts any segments written with +older formats. + +.When to use the `upgrade` API +************************************************** + +Newer versions of Lucene often come with a new index format which provides bug +fixes and performance improvements. In order to take advantage of these +improvements, the segments in each shard need to be rewritten using the latest +Lucene format. + +.Automatic upgrading + +Indices that are actively being written to will automatically write new +segments in the latest format. The background merge process which combines +multiple small segments into a single bigger segment will also write the new +merged segment in the latest format. + +.Optional manual upgrades + +Some old segments may never be merged away because they are already too big to +be worth merging, and indices that no longer receive changes will not be +upgraded automatically. Upgrading segments is not required for most +Elasticsearch upgrades because it can read older formats from the current and +previous major version of Lucene. + +You can, however, choose to upgrade old segments manually to take advantage of +the latest format. The `upgrade` API will rewrite any old segments in the +latest Lucene format. It can be run on one index, multiple or all indices, so +you can control when it is run and how many indices it should upgrade. + +.When you must use the `upgrade` API + +Elasticsearch can only read formats from the current and previous major +version of Lucene. For instance, Elasticsearch 2.x (Lucene 5) can read disk +formats from Elasticsearch 0.90 and 1.x (Lucene 4), but not from Elasticsearch +0.20 and before (Lucene 3). + +In fact, an Elasticsearch 2.0 cluster will refuse to start if any indices +created before Elasticsearch 0.90 are present, and it will refuse to open them +if they are imported as dangling indices later on. It will not be possible to +restore an index created with Elasticsearch 0.20.x and before into a 2.0 +cluster. + +These ancient indices must either be deleted or upgraded before migrating to +Elasticsearch 2.0. Upgrading will: + +* Rewrite old segments in the latest Lucene format. +* Add the `index.version.minimum_compatible` setting to the index, to mark it as + 2.0 compatible coming[1.6.0]. + +Instead of upgrading all segments that weren't written with the most recent +version of Lucene, you can choose to do the minimum work required before +moving to Elasticsearch 2.0, by specifying the `only_ancient_segments` option, +which will only rewrite segments written by Lucene 3. + +************************************************** [float] === Start an upgrade diff --git a/pom.xml b/pom.xml index 764444c2741..af5fc845ece 100644 --- a/pom.xml +++ b/pom.xml @@ -507,7 +507,7 @@ org.apache.maven.plugins maven-assembly-plugin - 2.5.4 + 2.5.5 org.apache.maven.plugins @@ -760,8 +760,6 @@ true false - - false jsr166e/** @@ -792,8 +790,6 @@ true false - - false jdk-unsafe