MINOR: Some Cleanups around Store (#36139)

* Moved method `canOpenIndex` is only used in tests -> moved to test CP
* Simplify `org.elasticsearch.index.store.Store#renameTempFilesSafe`
* Delete some dead methods
This commit is contained in:
Armin Braun 2018-12-03 11:21:42 +01:00 committed by GitHub
parent f763037b03
commit 328d022ddd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 64 additions and 56 deletions

View File

@ -91,7 +91,6 @@ import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@ -300,21 +299,18 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
// this works just like a lucene commit - we rename all temp files and once we successfully
// renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {
@Override
public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
String left = o1.getValue();
String right = o2.getValue();
if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
return -1;
} else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
return 1;
}
final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[0]);
ArrayUtil.timSort(entries, (o1, o2) -> {
String left = o1.getValue();
String right = o2.getValue();
if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
return -1;
} else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
return 1;
}
return left.compareTo(right);
}
return left.compareTo(right);
});
metadataLock.writeLock().lock();
// we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't
@ -454,22 +450,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
return MetadataSnapshot.EMPTY;
}
/**
* Returns <code>true</code> iff the given location contains an index an the index
* can be successfully opened. This includes reading the segment infos and possible
* corruption markers.
*/
public static boolean canOpenIndex(Logger logger, Path indexLocation,
ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
try {
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
} catch (Exception ex) {
logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
return false;
}
return true;
}
/**
* Tries to open an index for the given location. This includes reading the
* segment infos and possible corruption markers. If the index can not
@ -961,7 +941,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
private static final String DEL_FILE_EXTENSION = "del"; // legacy delete file
private static final String LIV_FILE_EXTENSION = "liv"; // lucene 5 delete file
private static final String FIELD_INFOS_FILE_EXTENSION = "fnm";
private static final String SEGMENT_INFO_EXTENSION = "si";
/**
@ -1015,12 +994,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
// only treat del files as per-commit files fnm files are generational but only for upgradable DV
perCommitStoreFiles.add(meta);
} else {
List<StoreFileMetaData> perSegStoreFiles = perSegment.get(segmentId);
if (perSegStoreFiles == null) {
perSegStoreFiles = new ArrayList<>();
perSegment.put(segmentId, perSegStoreFiles);
}
perSegStoreFiles.add(meta);
perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta);
}
}
final ArrayList<StoreFileMetaData> identicalFiles = new ArrayList<>();
@ -1072,13 +1046,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
return commitUserData.get(Engine.HISTORY_UUID_KEY);
}
/**
* returns the translog uuid the store points at
*/
public String getTranslogUUID() {
return commitUserData.get(Translog.TRANSLOG_UUID_KEY);
}
/**
* Returns true iff this metadata contains the given file.
*/
@ -1598,15 +1565,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
}
private void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException {
private static void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException {
final Map<String, String> userData = getUserData(writer);
userData.putAll(keysToUpdate);
writer.setLiveCommitData(userData.entrySet());
writer.commit();
}
private Map<String, String> getUserData(IndexWriter writer) {
private static Map<String, String> getUserData(IndexWriter writer) {
final Map<String, String> userData = new HashMap<>();
writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue()));
return userData;

View File

@ -106,6 +106,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.store.StoreUtils;
import org.elasticsearch.index.translog.TestTranslog;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogStats;
@ -261,7 +262,7 @@ public class IndexShardTests extends IndexShardTestCase {
ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath());
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
// but index can't be opened for a failed shard
assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(),
assertThat("store index should be corrupted", StoreUtils.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(),
(shardId, lockTimeoutMS) -> new DummyShardLock(shardId)),
equalTo(false));
}

View File

@ -28,7 +28,6 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@ -64,8 +63,4 @@ public class ShardUtilsTests extends ESTestCase {
}
IOUtils.close(writer, dir);
}
public static Engine getShardEngine(IndexShard shard) {
return shard.getEngine();
}
}

View File

@ -925,17 +925,17 @@ public class StoreTests extends ESTestCase {
IndexWriterConfig iwc = newIndexWriterConfig();
Path tempDir = createTempDir();
final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
writer.close();
assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
assertTrue(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId));
store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
store.close();
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.store;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.shard.ShardId;
import java.nio.file.Path;
public final class StoreUtils {
/**
* Returns {@code true} iff the given location contains an index an the index
* can be successfully opened. This includes reading the segment infos and possible
* corruption markers.
*/
public static boolean canOpenIndex(Logger logger, Path indexLocation,
ShardId shardId, NodeEnvironment.ShardLocker shardLocker) {
try {
Store.tryOpenIndex(indexLocation, shardId, shardLocker, logger);
} catch (Exception ex) {
logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex);
return false;
}
return true;
}
}