Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
f1426abe3a
|
@ -18,8 +18,6 @@
|
||||||
|
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<!-- testing is currently done unshaded :( -->
|
|
||||||
<elasticsearch.thirdparty.config>unshaded</elasticsearch.thirdparty.config>
|
|
||||||
<!-- Properties used for building RPM & DEB packages (see common/packaging.properties) -->
|
<!-- Properties used for building RPM & DEB packages (see common/packaging.properties) -->
|
||||||
<packaging.elasticsearch.home.dir>/usr/share/elasticsearch</packaging.elasticsearch.home.dir>
|
<packaging.elasticsearch.home.dir>/usr/share/elasticsearch</packaging.elasticsearch.home.dir>
|
||||||
<packaging.elasticsearch.bin.dir>/usr/share/elasticsearch/bin</packaging.elasticsearch.bin.dir>
|
<packaging.elasticsearch.bin.dir>/usr/share/elasticsearch/bin</packaging.elasticsearch.bin.dir>
|
||||||
|
@ -415,9 +413,12 @@
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<shadeTestJar>true</shadeTestJar>
|
<shadedArtifactAttached>true</shadedArtifactAttached>
|
||||||
|
<shadedClassifierName>shaded</shadedClassifierName>
|
||||||
|
<shadeTestJar>false</shadeTestJar>
|
||||||
<minimizeJar>true</minimizeJar>
|
<minimizeJar>true</minimizeJar>
|
||||||
<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
|
<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
|
||||||
|
<createDependencyReducedPom>false</createDependencyReducedPom>
|
||||||
<artifactSet>
|
<artifactSet>
|
||||||
<includes>
|
<includes>
|
||||||
<include>com.google.guava:guava</include>
|
<include>com.google.guava:guava</include>
|
||||||
|
|
|
@ -11,6 +11,20 @@
|
||||||
<include>com.spatial4j:spatial4j</include>
|
<include>com.spatial4j:spatial4j</include>
|
||||||
<include>com.vividsolutions:jts</include>
|
<include>com.vividsolutions:jts</include>
|
||||||
<include>org.codehaus.groovy:groovy-all</include>
|
<include>org.codehaus.groovy:groovy-all</include>
|
||||||
|
<include>com.google.guava:guava</include>
|
||||||
|
<include>com.carrotsearch:hppc</include>
|
||||||
|
<include>com.fasterxml.jackson.core:jackson-core</include>
|
||||||
|
<include>com.fasterxml.jackson.dataformat:jackson-dataformat-smile</include>
|
||||||
|
<include>com.fasterxml.jackson.dataformat:jackson-dataformat-yaml</include>
|
||||||
|
<include>com.fasterxml.jackson.dataformat:jackson-dataformat-cbor</include>
|
||||||
|
<include>joda-time:joda-time</include>
|
||||||
|
<include>org.joda:joda-convert</include>
|
||||||
|
<include>io.netty:netty</include>
|
||||||
|
<include>com.ning:compress-lzf</include>
|
||||||
|
<include>com.github.spullara.mustache.java:compiler</include>
|
||||||
|
<include>com.tdunning:t-digest</include>
|
||||||
|
<include>org.apache.commons:commons-lang3</include>
|
||||||
|
<include>commons-cli:commons-cli</include>
|
||||||
</includes>
|
</includes>
|
||||||
</dependencySet>
|
</dependencySet>
|
||||||
<dependencySet>
|
<dependencySet>
|
||||||
|
|
|
@ -544,6 +544,7 @@ public class GatewayAllocator extends AbstractComponent {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(String source, Throwable t) {
|
public void onFailure(String source, Throwable t) {
|
||||||
|
rerouting.set(false);
|
||||||
logger.warn("failed to perform reroute post async fetch for {}", t, source);
|
logger.warn("failed to perform reroute post async fetch for {}", t, source);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -19,15 +19,10 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.snapshots.blobstore;
|
package org.elasticsearch.index.snapshots.blobstore;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.io.ByteStreams;
|
import com.google.common.io.ByteStreams;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.*;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
|
||||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
|
||||||
import org.apache.lucene.index.SegmentInfos;
|
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
|
@ -902,13 +897,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
/// now, go over and clean files that are in the store, but were not in the snapshot
|
/// now, go over and clean files that are in the store, but were not in the snapshot
|
||||||
try {
|
try {
|
||||||
for (String storeFile : store.directory().listAll()) {
|
for (String storeFile : store.directory().listAll()) {
|
||||||
if (!Store.isChecksum(storeFile) && !snapshotFiles.containPhysicalIndexFile(storeFile)) {
|
if (Store.isAutogenerated(storeFile) || snapshotFiles.containPhysicalIndexFile(storeFile)) {
|
||||||
try {
|
continue; //skip write.lock, checksum files and files that exist in the snapshot
|
||||||
store.deleteQuiet("restore", storeFile);
|
}
|
||||||
store.directory().deleteFile(storeFile);
|
try {
|
||||||
} catch (IOException e) {
|
store.deleteQuiet("restore", storeFile);
|
||||||
logger.warn("[{}] failed to delete file [{}] during snapshot cleanup", snapshotId, storeFile);
|
store.directory().deleteFile(storeFile);
|
||||||
}
|
} catch (IOException e) {
|
||||||
|
logger.warn("[{}] failed to delete file [{}] during snapshot cleanup", snapshotId, storeFile);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -586,7 +586,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
try (Lock writeLock = Lucene.acquireWriteLock(directory)) {
|
try (Lock writeLock = Lucene.acquireWriteLock(directory)) {
|
||||||
final StoreDirectory dir = directory;
|
final StoreDirectory dir = directory;
|
||||||
for (String existingFile : dir.listAll()) {
|
for (String existingFile : dir.listAll()) {
|
||||||
if (existingFile.equals(IndexWriter.WRITE_LOCK_NAME) || Store.isChecksum(existingFile) || sourceMetaData.contains(existingFile)) {
|
if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
|
||||||
continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
|
continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
@ -1206,11 +1206,19 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
|
|
||||||
public static final String CHECKSUMS_PREFIX = "_checksums-";
|
public static final String CHECKSUMS_PREFIX = "_checksums-";
|
||||||
|
|
||||||
public static final boolean isChecksum(String name) {
|
public static boolean isChecksum(String name) {
|
||||||
// TODO can we drowp .cks
|
// TODO can we drowp .cks
|
||||||
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
|
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
|
||||||
|
* This includes write lock and checksum files
|
||||||
|
*/
|
||||||
|
public static boolean isAutogenerated(String name) {
|
||||||
|
return IndexWriter.WRITE_LOCK_NAME.equals(name) || isChecksum(name);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Produces a string representation of the given digest value.
|
* Produces a string representation of the given digest value.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
@ -164,7 +163,6 @@ public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test @Slow
|
@Test @Slow
|
||||||
@TestLogging("cluster.routing.allocation.allocator:TRACE")
|
|
||||||
public void multipleNodesShutdownNonMasterNodes() throws Exception {
|
public void multipleNodesShutdownNonMasterNodes() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("discovery.type", "zen")
|
.put("discovery.type", "zen")
|
||||||
|
|
|
@ -1,9 +1,64 @@
|
||||||
[[indices-upgrade]]
|
[[indices-upgrade]]
|
||||||
== Upgrade
|
== Upgrade
|
||||||
|
|
||||||
The upgrade API allows to upgrade one or more indices to the latest format
|
The upgrade API allows to upgrade one or more indices to the latest Lucene
|
||||||
through an API. The upgrade process converts any segments written
|
format through an API. The upgrade process converts any segments written with
|
||||||
with previous formats.
|
older formats.
|
||||||
|
|
||||||
|
.When to use the `upgrade` API
|
||||||
|
**************************************************
|
||||||
|
|
||||||
|
Newer versions of Lucene often come with a new index format which provides bug
|
||||||
|
fixes and performance improvements. In order to take advantage of these
|
||||||
|
improvements, the segments in each shard need to be rewritten using the latest
|
||||||
|
Lucene format.
|
||||||
|
|
||||||
|
.Automatic upgrading
|
||||||
|
|
||||||
|
Indices that are actively being written to will automatically write new
|
||||||
|
segments in the latest format. The background merge process which combines
|
||||||
|
multiple small segments into a single bigger segment will also write the new
|
||||||
|
merged segment in the latest format.
|
||||||
|
|
||||||
|
.Optional manual upgrades
|
||||||
|
|
||||||
|
Some old segments may never be merged away because they are already too big to
|
||||||
|
be worth merging, and indices that no longer receive changes will not be
|
||||||
|
upgraded automatically. Upgrading segments is not required for most
|
||||||
|
Elasticsearch upgrades because it can read older formats from the current and
|
||||||
|
previous major version of Lucene.
|
||||||
|
|
||||||
|
You can, however, choose to upgrade old segments manually to take advantage of
|
||||||
|
the latest format. The `upgrade` API will rewrite any old segments in the
|
||||||
|
latest Lucene format. It can be run on one index, multiple or all indices, so
|
||||||
|
you can control when it is run and how many indices it should upgrade.
|
||||||
|
|
||||||
|
.When you must use the `upgrade` API
|
||||||
|
|
||||||
|
Elasticsearch can only read formats from the current and previous major
|
||||||
|
version of Lucene. For instance, Elasticsearch 2.x (Lucene 5) can read disk
|
||||||
|
formats from Elasticsearch 0.90 and 1.x (Lucene 4), but not from Elasticsearch
|
||||||
|
0.20 and before (Lucene 3).
|
||||||
|
|
||||||
|
In fact, an Elasticsearch 2.0 cluster will refuse to start if any indices
|
||||||
|
created before Elasticsearch 0.90 are present, and it will refuse to open them
|
||||||
|
if they are imported as dangling indices later on. It will not be possible to
|
||||||
|
restore an index created with Elasticsearch 0.20.x and before into a 2.0
|
||||||
|
cluster.
|
||||||
|
|
||||||
|
These ancient indices must either be deleted or upgraded before migrating to
|
||||||
|
Elasticsearch 2.0. Upgrading will:
|
||||||
|
|
||||||
|
* Rewrite old segments in the latest Lucene format.
|
||||||
|
* Add the `index.version.minimum_compatible` setting to the index, to mark it as
|
||||||
|
2.0 compatible coming[1.6.0].
|
||||||
|
|
||||||
|
Instead of upgrading all segments that weren't written with the most recent
|
||||||
|
version of Lucene, you can choose to do the minimum work required before
|
||||||
|
moving to Elasticsearch 2.0, by specifying the `only_ancient_segments` option,
|
||||||
|
which will only rewrite segments written by Lucene 3.
|
||||||
|
|
||||||
|
**************************************************
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Start an upgrade
|
=== Start an upgrade
|
||||||
|
|
6
pom.xml
6
pom.xml
|
@ -507,7 +507,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
<version>2.5.4</version>
|
<version>2.5.5</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
@ -760,8 +760,6 @@
|
||||||
<internalRuntimeForbidden>true</internalRuntimeForbidden>
|
<internalRuntimeForbidden>true</internalRuntimeForbidden>
|
||||||
<!-- if the used Java version is too new, don't fail, just do nothing: -->
|
<!-- if the used Java version is too new, don't fail, just do nothing: -->
|
||||||
<failOnUnsupportedJava>false</failOnUnsupportedJava>
|
<failOnUnsupportedJava>false</failOnUnsupportedJava>
|
||||||
<!-- Temporary until we stabilize builds -->
|
|
||||||
<failOnUnresolvableSignatures>false</failOnUnresolvableSignatures>
|
|
||||||
<excludes>
|
<excludes>
|
||||||
<exclude>jsr166e/**</exclude>
|
<exclude>jsr166e/**</exclude>
|
||||||
</excludes>
|
</excludes>
|
||||||
|
@ -792,8 +790,6 @@
|
||||||
<internalRuntimeForbidden>true</internalRuntimeForbidden>
|
<internalRuntimeForbidden>true</internalRuntimeForbidden>
|
||||||
<!-- if the used Java version is too new, don't fail, just do nothing: -->
|
<!-- if the used Java version is too new, don't fail, just do nothing: -->
|
||||||
<failOnUnsupportedJava>false</failOnUnsupportedJava>
|
<failOnUnsupportedJava>false</failOnUnsupportedJava>
|
||||||
<!-- Temporary until we stabilize builds -->
|
|
||||||
<failOnUnresolvableSignatures>false</failOnUnresolvableSignatures>
|
|
||||||
<bundledSignatures>
|
<bundledSignatures>
|
||||||
<!-- This will automatically choose the right signatures based on 'targetVersion': -->
|
<!-- This will automatically choose the right signatures based on 'targetVersion': -->
|
||||||
<bundledSignature>jdk-unsafe</bundledSignature>
|
<bundledSignature>jdk-unsafe</bundledSignature>
|
||||||
|
|
Loading…
Reference in New Issue