Add upgrader to upgrade old indices to new naming convention

This commit is contained in:
Areek Zillur 2016-03-14 23:13:06 -04:00
parent dfec4547ea
commit 35f7cfb6c0
3 changed files with 531 additions and 0 deletions

View File

@ -0,0 +1,154 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
/**
* Renames index folders from {index.name} to {index.uuid}
*/
public class IndexFolderUpgrader {
private final NodeEnvironment nodeEnv;
private final Settings settings;
private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class);
private final MetaDataStateFormat<IndexMetaData> indexStateFormat = readOnlyIndexMetaDataStateFormat();
/**
* Creates a new upgrader instance
* @param settings node settings
* @param nodeEnv the node env to operate on
*/
IndexFolderUpgrader(Settings settings, NodeEnvironment nodeEnv) {
this.settings = settings;
this.nodeEnv = nodeEnv;
}
/**
* Moves the index folder found in <code>source</code> to <code>target</code>
*/
void upgrade(final Index index, final Path source, final Path target) throws IOException {
boolean success = false;
try {
Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
success = true;
} catch (NoSuchFileException | FileNotFoundException exception) {
// thrown when the source is non-existent because the folder was renamed
// by another node (shared FS) after we checked if the target exists
logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node",
exception, target);
throw exception;
} finally {
if (success) {
logger.info("{} moved from [{}] to [{}]", index, source, target);
logger.trace("{} syncing directory [{}]", index, target);
IOUtils.fsync(target, true);
}
}
}
/**
* Renames <code>indexFolderName</code> index folders found in node paths and custom path
* iff {@link #needsUpgrade(Index, String)} is true.
* Index folder in custom paths are renamed first followed by index folders in each node path.
*/
void upgrade(final String indexFolderName) throws IOException {
for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) {
final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName);
final IndexMetaData indexMetaData = indexStateFormat.loadLatestState(logger, indexFolderPath);
if (indexMetaData != null) {
final Index index = indexMetaData.getIndex();
if (needsUpgrade(index, indexFolderName)) {
logger.info("{} upgrading [{}] to new naming convention", index, indexFolderPath);
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
if (indexSettings.hasCustomDataPath()) {
// we rename index folder in custom path before renaming them in any node path
// to have the index state under a not-yet-upgraded index folder, which we use to
// continue renaming after a incomplete upgrade.
final Path customLocationSource = nodeEnv.resolveBaseCustomLocation(indexSettings)
.resolve(indexFolderName);
final Path customLocationTarget = customLocationSource.resolveSibling(index.getUUID());
// we rename the folder in custom path only the first time we encounter a state
// in a node path, which needs upgrading, it is a no-op for subsequent node paths
if (Files.exists(customLocationSource) // might not exist if no data was written for this index
&& Files.exists(customLocationTarget) == false) {
upgrade(index, customLocationSource, customLocationTarget);
} else {
logger.info("[{}] no upgrade needed - already upgraded", customLocationTarget);
}
}
upgrade(index, indexFolderPath, indexFolderPath.resolveSibling(index.getUUID()));
} else {
logger.debug("[{}] no upgrade needed - already upgraded", indexFolderPath);
}
} else {
logger.warn("[{}] no index state found - ignoring", indexFolderPath);
}
}
}
/**
* Upgrades all indices found under <code>nodeEnv</code>. Already upgraded indices are ignored.
*/
public static void upgradeIndicesIfNeeded(final Settings settings, final NodeEnvironment nodeEnv) throws IOException {
final IndexFolderUpgrader upgrader = new IndexFolderUpgrader(settings, nodeEnv);
for (String indexFolderName : nodeEnv.availableIndexFolders()) {
upgrader.upgrade(indexFolderName);
}
}
static boolean needsUpgrade(Index index, String indexFolderName) {
return indexFolderName.equals(index.getUUID()) == false;
}
static MetaDataStateFormat<IndexMetaData> readOnlyIndexMetaDataStateFormat() {
// NOTE: XContentType param is not used as we use the format read from the serialized index state
return new MetaDataStateFormat<IndexMetaData>(XContentType.SMILE, MetaStateService.INDEX_STATE_FILE_PREFIX) {
@Override
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
return IndexMetaData.Builder.fromXContent(parser);
}
};
}
}

View File

@ -0,0 +1,366 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.Version;
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.shard.ShardStateMetaData;
import org.elasticsearch.test.ESTestCase;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import static org.hamcrest.core.Is.is;
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
public class IndexFolderUpgraderTests extends ESTestCase {
private static MetaDataStateFormat<IndexMetaData> indexMetaDataStateFormat =
new MetaDataStateFormat<IndexMetaData>(XContentType.SMILE, MetaStateService.INDEX_STATE_FILE_PREFIX) {
@Override
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
IndexMetaData.Builder.toXContent(state, builder, ToXContent.EMPTY_PARAMS);
}
@Override
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
return IndexMetaData.Builder.fromXContent(parser);
}
};
/**
* tests custom data paths are upgraded
*/
public void testUpgradeCustomDataPath() throws IOException {
Path customPath = createTempDir();
final Settings nodeSettings = Settings.builder()
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
Settings settings = Settings.builder()
.put(nodeSettings)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
int numIdxFiles = randomIntBetween(1, 5);
int numTranslogFiles = randomIntBetween(1, 5);
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv);
helper.upgrade(indexSettings.getIndex().getName());
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
}
}
/**
* tests upgrade on partially upgraded index, when we crash while upgrading
*/
public void testPartialUpgradeCustomDataPath() throws IOException {
Path customPath = createTempDir();
final Settings nodeSettings = Settings.builder()
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
Settings settings = Settings.builder()
.put(nodeSettings)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
int numIdxFiles = randomIntBetween(1, 5);
int numTranslogFiles = randomIntBetween(1, 5);
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv) {
@Override
void upgrade(Index index, Path source, Path target) throws IOException {
if(randomBoolean()) {
throw new FileNotFoundException("simulated");
}
}
};
// only upgrade some paths
try {
helper.upgrade(index.getName());
} catch (IOException e) {
assertTrue(e instanceof FileNotFoundException);
}
helper = new IndexFolderUpgrader(settings, nodeEnv);
// try to upgrade again
helper.upgrade(indexSettings.getIndex().getName());
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
}
}
public void testUpgrade() throws IOException {
final Settings nodeSettings = Settings.builder()
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
Settings settings = Settings.builder()
.put(nodeSettings)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
int numIdxFiles = randomIntBetween(1, 5);
int numTranslogFiles = randomIntBetween(1, 5);
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv);
helper.upgrade(indexSettings.getIndex().getName());
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
}
}
public void testUpgradeIndices() throws IOException {
final Settings nodeSettings = Settings.builder()
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
Map<IndexSettings, Tuple<Integer, Integer>> indexSettingsMap = new HashMap<>();
for (int i = 0; i < randomIntBetween(2, 5); i++) {
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
Settings settings = Settings.builder()
.put(nodeSettings)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build();
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
Tuple<Integer, Integer> fileCounts = new Tuple<>(randomIntBetween(1, 5), randomIntBetween(1, 5));
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
indexSettingsMap.put(indexSettings, fileCounts);
writeIndex(nodeEnv, indexSettings, fileCounts.v1(), fileCounts.v2());
}
IndexFolderUpgrader.upgradeIndicesIfNeeded(nodeSettings, nodeEnv);
for (Map.Entry<IndexSettings, Tuple<Integer, Integer>> entry : indexSettingsMap.entrySet()) {
checkIndex(nodeEnv, entry.getKey(), entry.getValue().v1(), entry.getValue().v2());
}
}
}
/**
* Run upgrade on a real bwc index
*/
public void testUpgradeRealIndex() throws IOException, URISyntaxException {
List<Path> indexes = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) {
for (Path path : stream) {
indexes.add(path);
}
}
CollectionUtil.introSort(indexes, (o1, o2) -> o1.getFileName().compareTo(o2.getFileName()));
final Path path = randomFrom(indexes);
final String indexName = path.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
Path unzipDir = createTempDir();
Path unzipDataDir = unzipDir.resolve("data");
// decompress the index
try (InputStream stream = Files.newInputStream(path)) {
TestUtil.unzip(stream, unzipDir);
}
// check it is unique
assertTrue(Files.exists(unzipDataDir));
Path[] list = FileSystemUtils.files(unzipDataDir);
if (list.length != 1) {
throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
}
// the bwc scripts packs the indices under this path
Path src = list[0].resolve("nodes/0/indices/" + indexName);
assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src));
final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath;
logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath);
OldIndexBackwardsCompatibilityIT.copyIndex(logger, src, indexName, indicesPath);
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
// ensure old index folder is deleted
Set<String> indexFolders = nodeEnvironment.availableIndexFolders();
assertEquals(indexFolders.size(), 1);
// ensure index metadata is moved
IndexMetaData indexMetaData = indexMetaDataStateFormat.loadLatestState(logger,
nodeEnvironment.resolveIndexFolder(indexFolders.iterator().next()));
assertNotNull(indexMetaData);
Index index = indexMetaData.getIndex();
assertEquals(index.getName(), indexName);
Set<ShardId> shardIds = nodeEnvironment.findAllShardIds(index);
// ensure all shards are moved
assertEquals(shardIds.size(), indexMetaData.getNumberOfShards());
for (ShardId shardId : shardIds) {
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnvironment, shardId,
new IndexSettings(indexMetaData, Settings.EMPTY));
final Path translog = shardPath.resolveTranslog();
final Path idx = shardPath.resolveIndex();
final Path state = shardPath.getShardStatePath().resolve(MetaDataStateFormat.STATE_DIR_NAME);
assertTrue(shardPath.exists());
assertTrue(Files.exists(translog));
assertTrue(Files.exists(idx));
assertTrue(Files.exists(state));
}
}
}
public void testNeedsUpgrade() throws IOException {
final Index index = new Index("foo", Strings.randomBase64UUID());
IndexMetaData indexState = IndexMetaData.builder(index.getName())
.settings(Settings.builder()
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.build();
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
indexMetaDataStateFormat.write(indexState, 1, nodeEnvironment.indexPaths(index));
assertFalse(IndexFolderUpgrader.needsUpgrade(index, index.getUUID()));
}
}
private void checkIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings,
int numIdxFiles, int numTranslogFiles) throws IOException {
final Index index = indexSettings.getIndex();
// ensure index state can be loaded
IndexMetaData loadLatestState = indexMetaDataStateFormat.loadLatestState(logger, nodeEnv.indexPaths(index));
assertNotNull(loadLatestState);
assertEquals(loadLatestState.getIndex(), index);
for (int shardId = 0; shardId < indexSettings.getNumberOfShards(); shardId++) {
// ensure shard path can be loaded
ShardPath targetShardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, shardId), indexSettings);
assertNotNull(targetShardPath);
// ensure shard contents are copied over
final Path translog = targetShardPath.resolveTranslog();
final Path idx = targetShardPath.resolveIndex();
// ensure index and translog files are copied over
assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length);
assertEquals(numIdxFiles, FileSystemUtils.files(idx).length);
Path[] files = FileSystemUtils.files(translog);
final HashSet<Path> translogFiles = new HashSet<>(Arrays.asList(files));
for (int i = 0; i < numTranslogFiles; i++) {
final String name = Integer.toString(i);
translogFiles.contains(translog.resolve(name + ".translog"));
byte[] content = Files.readAllBytes(translog.resolve(name + ".translog"));
assertEquals(name , new String(content, StandardCharsets.UTF_8));
}
Path[] indexFileList = FileSystemUtils.files(idx);
final HashSet<Path> idxFiles = new HashSet<>(Arrays.asList(indexFileList));
for (int i = 0; i < numIdxFiles; i++) {
final String name = Integer.toString(i);
idxFiles.contains(idx.resolve(name + ".tst"));
byte[] content = Files.readAllBytes(idx.resolve(name + ".tst"));
assertEquals(name, new String(content, StandardCharsets.UTF_8));
}
}
}
private void writeIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings,
int numIdxFiles, int numTranslogFiles) throws IOException {
NodeEnvironment.NodePath[] nodePaths = nodeEnv.nodePaths();
Path[] oldIndexPaths = new Path[nodePaths.length];
for (int i = 0; i < nodePaths.length; i++) {
oldIndexPaths[i] = nodePaths[i].indicesPath.resolve(indexSettings.getIndex().getName());
}
indexMetaDataStateFormat.write(indexSettings.getIndexMetaData(), 1, oldIndexPaths);
for (int id = 0; id < indexSettings.getNumberOfShards(); id++) {
Path oldIndexPath = randomFrom(oldIndexPaths);
ShardId shardId = new ShardId(indexSettings.getIndex(), id);
if (indexSettings.hasCustomDataPath()) {
Path customIndexPath = nodeEnv.resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getIndex().getName());
writeShard(shardId, customIndexPath, numIdxFiles, numTranslogFiles);
} else {
writeShard(shardId, oldIndexPath, numIdxFiles, numTranslogFiles);
}
ShardStateMetaData state = new ShardStateMetaData(true, indexSettings.getUUID(), AllocationId.newInitializing());
ShardStateMetaData.FORMAT.write(state, 1, oldIndexPath.resolve(String.valueOf(shardId.getId())));
}
}
private void writeShard(ShardId shardId, Path indexLocation,
final int numIdxFiles, final int numTranslogFiles) throws IOException {
Path oldShardDataPath = indexLocation.resolve(String.valueOf(shardId.getId()));
final Path translogPath = oldShardDataPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
final Path idxPath = oldShardDataPath.resolve(ShardPath.INDEX_FOLDER_NAME);
Files.createDirectories(translogPath);
Files.createDirectories(idxPath);
for (int i = 0; i < numIdxFiles; i++) {
String filename = Integer.toString(i);
try (BufferedWriter w = Files.newBufferedWriter(idxPath.resolve(filename + ".tst"),
StandardCharsets.UTF_8)) {
w.write(filename);
}
}
for (int i = 0; i < numTranslogFiles; i++) {
String filename = Integer.toString(i);
try (BufferedWriter w = Files.newBufferedWriter(translogPath.resolve(filename + ".translog"),
StandardCharsets.UTF_8)) {
w.write(filename);
}
}
}
}

View File

@ -4,6 +4,17 @@
This section discusses the changes that you need to be aware of when migrating
your application to Elasticsearch 5.0.
[float]
=== Indices created before 5.0
Elasticsearch 5.0 can read indices created in version 2.0 and above. If any
of your indices were created before 2.0 you will need to upgrade to the
latest 2.x version of Elasticsearch first, in order to upgrade your indices or
to delete the old indices. Elasticsearch will not start in the presence of old
indices. To upgrade 2.x indices, first start a node which have access to all
the data folders and let it upgrade all the indices before starting up rest of
the cluster.
[IMPORTANT]
.Reindex indices from Elasticseach 1.x or before
=========================================