[DEPRECATE] SimpleFS in favor of NIOFS (#1073)
Lucene 9 removes support for SimpleFS File System format. This commit deprecates the SimpleFS format in favor of NIOFS. Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
parent
93a621f54e
commit
ff7e7904ca
|
@ -35,7 +35,7 @@ package org.opensearch.common.settings;
|
|||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.common.Randomness;
|
||||
import org.opensearch.core.internal.io.IOUtils;
|
||||
import org.opensearch.env.Environment;
|
||||
|
@ -192,7 +192,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
|
||||
public void testFailWhenCannotConsumeSecretStream() throws Exception {
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
|
||||
indexOutput.writeByte((byte) 0); // No password
|
||||
|
@ -220,7 +220,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
|
||||
public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception {
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
|
||||
indexOutput.writeByte((byte) 0); // No password
|
||||
|
@ -249,7 +249,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
|
||||
public void testFailWhenSecretStreamNotConsumed() throws Exception {
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
|
||||
indexOutput.writeByte((byte) 0); // No password
|
||||
|
@ -276,7 +276,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
|
||||
public void testFailWhenEncryptedBytesStreamIsNotConsumed() throws Exception {
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
|
||||
indexOutput.writeByte((byte) 0); // No password
|
||||
|
@ -362,7 +362,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
public void testBackcompatV1() throws Exception {
|
||||
assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm());
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexOutput output = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(output, "opensearch.keystore", 1);
|
||||
output.writeByte((byte) 0); // hasPassword = false
|
||||
|
@ -393,7 +393,7 @@ public class KeyStoreWrapperTests extends OpenSearchTestCase {
|
|||
public void testBackcompatV2() throws Exception {
|
||||
assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm());
|
||||
Path configDir = env.configFile();
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
byte[] fileBytes = new byte[20];
|
||||
random().nextBytes(fileBytes);
|
||||
try (IndexOutput output = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
package org.opensearch.index.store;
|
||||
|
||||
import org.opensearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* Index Settings Tests for NIO FileSystem as index store type.
|
||||
*/
|
||||
public class SmbNIOFsTests extends AbstractAzureFsTestCase {
|
||||
@Override
|
||||
public Settings indexSettings() {
|
||||
return Settings.builder()
|
||||
.put(super.indexSettings())
|
||||
.put("index.store.type", "smb_nio_fs")
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
package org.opensearch.index.store.smbniofs;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.index.IndexSettings;
|
||||
import org.opensearch.index.store.FsDirectoryFactory;
|
||||
import org.opensearch.index.store.SmbDirectoryWrapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
/**
|
||||
* Factory to create a new NIO File System type directory accessible as a SMB share
|
||||
*/
|
||||
public final class SmbNIOFsDirectoryFactory extends FsDirectoryFactory {
|
||||
|
||||
@Override
|
||||
protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException {
|
||||
return new SmbDirectoryWrapper(new NIOFSDirectory(location, lockFactory));
|
||||
}
|
||||
}
|
|
@ -35,6 +35,8 @@ package org.opensearch.index.store.smbsimplefs;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockFactory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.opensearch.common.logging.DeprecationLogger;
|
||||
import org.opensearch.index.IndexModule;
|
||||
import org.opensearch.index.IndexSettings;
|
||||
import org.opensearch.index.store.FsDirectoryFactory;
|
||||
import org.opensearch.index.store.SmbDirectoryWrapper;
|
||||
|
@ -42,10 +44,20 @@ import org.opensearch.index.store.SmbDirectoryWrapper;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
/**
|
||||
* Factory to create a new Simple File System type directory accessible as a SMB share
|
||||
*
|
||||
* @deprecated use {@link org.opensearch.index.store.smbniofs.SmbNIOFsDirectoryFactory} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public final class SmbSimpleFsDirectoryFactory extends FsDirectoryFactory {
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(SmbSimpleFsDirectoryFactory.class);
|
||||
|
||||
@Override
|
||||
protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException {
|
||||
DEPRECATION_LOGGER.deprecate(IndexModule.Type.SIMPLEFS.getSettingsKey(), IndexModule.Type.SIMPLEFS.getSettingsKey()
|
||||
+ " is deprecated and will be removed in 2.0");
|
||||
return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,10 @@
|
|||
|
||||
package org.opensearch.plugin.store.smb;
|
||||
|
||||
import org.opensearch.common.logging.DeprecationLogger;
|
||||
import org.opensearch.index.IndexModule;
|
||||
import org.opensearch.index.store.smbmmapfs.SmbMmapFsDirectoryFactory;
|
||||
import org.opensearch.index.store.smbniofs.SmbNIOFsDirectoryFactory;
|
||||
import org.opensearch.index.store.smbsimplefs.SmbSimpleFsDirectoryFactory;
|
||||
import org.opensearch.plugins.IndexStorePlugin;
|
||||
import org.opensearch.plugins.Plugin;
|
||||
|
@ -43,11 +46,16 @@ import java.util.Map;
|
|||
|
||||
public class SMBStorePlugin extends Plugin implements IndexStorePlugin {
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(SMBStorePlugin.class);
|
||||
|
||||
@Override
|
||||
public Map<String, DirectoryFactory> getDirectoryFactories() {
|
||||
final Map<String, DirectoryFactory> indexStoreFactories = new HashMap<>(2);
|
||||
indexStoreFactories.put("smb_mmap_fs", new SmbMmapFsDirectoryFactory());
|
||||
DEPRECATION_LOGGER.deprecate(IndexModule.Type.SIMPLEFS.getSettingsKey(), IndexModule.Type.SIMPLEFS.getSettingsKey()
|
||||
+ " is deprecated and will be removed in 2.0");
|
||||
indexStoreFactories.put("smb_simple_fs", new SmbSimpleFsDirectoryFactory());
|
||||
indexStoreFactories.put("smb_nio_fs", new SmbNIOFsDirectoryFactory());
|
||||
return Collections.unmodifiableMap(indexStoreFactories);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* The OpenSearch Contributors require contributions made to
|
||||
* this file be licensed under the Apache-2.0 license or a
|
||||
* compatible open source license.
|
||||
*/
|
||||
|
||||
package org.opensearch.index.store;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
||||
/**
|
||||
* SMB Tests using NIO FileSystem as index store type.
|
||||
*/
|
||||
public class SmbNIOFSDirectoryTests extends OpenSearchBaseDirectoryTestCase {
|
||||
|
||||
@Override
|
||||
protected Directory getDirectory(Path file) throws IOException {
|
||||
return new SmbDirectoryWrapper(new NIOFSDirectory(file));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testCreateOutputForExistingFile() throws IOException {
|
||||
/**
|
||||
* This test is disabled because {@link SmbDirectoryWrapper} opens existing file
|
||||
* with an explicit StandardOpenOption.TRUNCATE_EXISTING option.
|
||||
*/
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
package org.opensearch.cluster.routing;
|
||||
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
|
||||
import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplanation;
|
||||
import org.opensearch.action.admin.indices.stats.ShardStats;
|
||||
|
@ -151,7 +151,7 @@ public class AllocationIdIT extends OpenSearchIntegTestCase {
|
|||
});
|
||||
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
|
||||
try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) {
|
||||
try(Store store = new Store(shardId, indexSettings, new NIOFSDirectory(indexPath), new DummyShardLock(shardId))) {
|
||||
store.removeCorruptionMarker();
|
||||
}
|
||||
node1 = internalCluster().startNode(node1DataPathSettings);
|
||||
|
@ -229,7 +229,7 @@ public class AllocationIdIT extends OpenSearchIntegTestCase {
|
|||
}
|
||||
|
||||
private void putFakeCorruptionMarker(IndexSettings indexSettings, ShardId shardId, Path indexPath) throws IOException {
|
||||
try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) {
|
||||
try(Store store = new Store(shardId, indexSettings, new NIOFSDirectory(indexPath), new DummyShardLock(shardId))) {
|
||||
store.markStoreCorrupted(new IOException("fake ioexception"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,6 +76,7 @@ import org.opensearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.opensearch.common.xcontent.XContentHelper;
|
||||
import org.opensearch.env.Environment;
|
||||
import org.opensearch.index.Index;
|
||||
import org.opensearch.index.IndexModule;
|
||||
import org.opensearch.index.IndexNotFoundException;
|
||||
import org.opensearch.index.IndexService;
|
||||
import org.opensearch.index.IndexSettings;
|
||||
|
@ -783,9 +784,20 @@ public class MetadataCreateIndexService {
|
|||
"Please do not specify value for setting [index.soft_deletes.enabled] of index [" + request.index() + "].");
|
||||
}
|
||||
validateTranslogRetentionSettings(indexSettings);
|
||||
validateStoreTypeSettings(indexSettings);
|
||||
|
||||
return indexSettings;
|
||||
}
|
||||
|
||||
public static void validateStoreTypeSettings(Settings settings) {
|
||||
// deprecate simplefs store type:
|
||||
if (IndexModule.Type.SIMPLEFS.match(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings))) {
|
||||
DEPRECATION_LOGGER.deprecate("store_type_setting",
|
||||
"[simplefs] is deprecated and will be removed in 2.0. Use [niofs], which offers equal or better performance, " +
|
||||
"or other file systems instead.");
|
||||
}
|
||||
}
|
||||
|
||||
static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) {
|
||||
// TODO: this logic can be removed when the current major version is 8
|
||||
assert Version.CURRENT.major == 1;
|
||||
|
|
|
@ -250,8 +250,12 @@ public class MetadataUpdateSettingsService {
|
|||
|
||||
if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) ||
|
||||
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) {
|
||||
Settings indexSettings;
|
||||
for (String index : actualIndices) {
|
||||
MetadataCreateIndexService.validateTranslogRetentionSettings(metadataBuilder.get(index).getSettings());
|
||||
indexSettings = metadataBuilder.get(index).getSettings();
|
||||
MetadataCreateIndexService.validateTranslogRetentionSettings(indexSettings);
|
||||
// validate storeType for deprecating index stores
|
||||
MetadataCreateIndexService.validateStoreTypeSettings(indexSettings);
|
||||
}
|
||||
}
|
||||
boolean changed = false;
|
||||
|
|
|
@ -40,7 +40,7 @@ import org.apache.lucene.store.ChecksumIndexInput;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.opensearch.cli.ExitCodes;
|
||||
import org.opensearch.cli.UserException;
|
||||
|
@ -236,7 +236,7 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
return null;
|
||||
}
|
||||
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
try (IndexInput indexInput = directory.openInput(keystoreFileName, IOContext.READONCE)) {
|
||||
ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput);
|
||||
final int formatVersion;
|
||||
|
@ -502,7 +502,7 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
public synchronized void save(Path configDir, char[] password) throws Exception {
|
||||
ensureOpen();
|
||||
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
NIOFSDirectory directory = new NIOFSDirectory(configDir);
|
||||
// write to tmp file first, then overwrite
|
||||
String tmpFile = KEYSTORE_FILENAME + ".tmp";
|
||||
try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) {
|
||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.opensearch.OpenSearchException;
|
||||
import org.opensearch.Version;
|
||||
import org.opensearch.cluster.metadata.IndexMetadata;
|
||||
|
@ -501,7 +501,7 @@ public final class NodeEnvironment implements Closeable {
|
|||
// resolve the directory the shard actually lives in
|
||||
Path p = shardPaths[i].resolve("index");
|
||||
// open a directory (will be immediately closed) on the shard's location
|
||||
dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING));
|
||||
dirs[i] = new NIOFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING));
|
||||
// create a lock for the "write.lock" file
|
||||
try {
|
||||
locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.ExceptionsHelper;
|
||||
import org.opensearch.common.collect.Tuple;
|
||||
import org.opensearch.common.lucene.store.IndexOutputOutputStream;
|
||||
|
@ -323,7 +323,7 @@ public abstract class MetadataStateFormat<T> {
|
|||
}
|
||||
|
||||
protected Directory newDirectory(Path dir) throws IOException {
|
||||
return new SimpleFSDirectory(dir);
|
||||
return new NIOFSDirectory(dir);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
|
@ -228,7 +228,7 @@ public class PersistedClusterStateService {
|
|||
*/
|
||||
public static void deleteAll(Path[] dataPaths) throws IOException {
|
||||
for (Path dataPath : dataPaths) {
|
||||
Lucene.cleanLuceneIndex(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)));
|
||||
Lucene.cleanLuceneIndex(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,8 +236,8 @@ public class PersistedClusterStateService {
|
|||
Directory createDirectory(Path path) throws IOException {
|
||||
// it is possible to disable the use of MMapDirectory for indices, and it may be surprising to users that have done so if we still
|
||||
// use a MMapDirectory here, which might happen with FSDirectory.open(path). Concurrency is of no concern here so a
|
||||
// SimpleFSDirectory is fine:
|
||||
return new SimpleFSDirectory(path);
|
||||
// NIOFSDirectory is fine:
|
||||
return new NIOFSDirectory(path);
|
||||
}
|
||||
|
||||
public Path[] getDataPaths() {
|
||||
|
@ -277,7 +277,7 @@ public class PersistedClusterStateService {
|
|||
for (final Path dataPath : dataPaths) {
|
||||
final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME);
|
||||
if (Files.exists(indexPath)) {
|
||||
try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
|
||||
try (DirectoryReader reader = DirectoryReader.open(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
|
||||
final Map<String, String> userData = reader.getIndexCommit().getUserData();
|
||||
assert userData.get(NODE_VERSION_KEY) != null;
|
||||
|
||||
|
@ -308,12 +308,12 @@ public class PersistedClusterStateService {
|
|||
for (final Path dataPath : dataPaths) {
|
||||
final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME);
|
||||
if (Files.exists(indexPath)) {
|
||||
try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
|
||||
try (DirectoryReader reader = DirectoryReader.open(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
|
||||
final Map<String, String> userData = reader.getIndexCommit().getUserData();
|
||||
assert userData.get(NODE_VERSION_KEY) != null;
|
||||
|
||||
try (IndexWriter indexWriter =
|
||||
createIndexWriter(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) {
|
||||
createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) {
|
||||
final Map<String, String> commitData = new HashMap<>(userData);
|
||||
commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id));
|
||||
indexWriter.setLiveCommitData(commitData.entrySet());
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.opensearch.cluster.service.ClusterService;
|
|||
import org.opensearch.common.CheckedFunction;
|
||||
import org.opensearch.common.TriFunction;
|
||||
import org.opensearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.opensearch.common.logging.DeprecationLogger;
|
||||
import org.opensearch.common.settings.Setting;
|
||||
import org.opensearch.common.settings.Setting.Property;
|
||||
import org.opensearch.common.settings.Settings;
|
||||
|
@ -139,6 +140,8 @@ public final class IndexModule {
|
|||
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING =
|
||||
Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope);
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndexModule.class);
|
||||
|
||||
private final IndexSettings indexSettings;
|
||||
private final AnalysisRegistry analysisRegistry;
|
||||
private final EngineFactory engineFactory;
|
||||
|
@ -365,9 +368,15 @@ public final class IndexModule {
|
|||
FS("fs");
|
||||
|
||||
private final String settingsKey;
|
||||
private final boolean deprecated;
|
||||
|
||||
Type(final String settingsKey) {
|
||||
this(settingsKey, false);
|
||||
}
|
||||
|
||||
Type(final String settingsKey, final boolean deprecated) {
|
||||
this.settingsKey = settingsKey;
|
||||
this.deprecated = deprecated;
|
||||
}
|
||||
|
||||
private static final Map<String, Type> TYPES;
|
||||
|
@ -384,11 +393,18 @@ public final class IndexModule {
|
|||
return this.settingsKey;
|
||||
}
|
||||
|
||||
public boolean isDeprecated() {
|
||||
return deprecated;
|
||||
}
|
||||
|
||||
public static Type fromSettingsKey(final String key) {
|
||||
final Type type = TYPES.get(key);
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("no matching store type for [" + key + "]");
|
||||
}
|
||||
if (type.isDeprecated()) {
|
||||
DEPRECATION_LOGGER.deprecate(type.getSettingsKey(), " is deprecated and will be removed in 2.0");
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
|
@ -404,8 +420,6 @@ public final class IndexModule {
|
|||
public static Type defaultStoreType(final boolean allowMmap) {
|
||||
if (allowMmap && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
|
||||
return Type.HYBRIDFS;
|
||||
} else if (Constants.WINDOWS) {
|
||||
return Type.SIMPLEFS;
|
||||
} else {
|
||||
return Type.NIOFS;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.lucene.store.NIOFSDirectory;
|
|||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.opensearch.common.logging.DeprecationLogger;
|
||||
import org.opensearch.common.settings.Setting;
|
||||
import org.opensearch.common.settings.Setting.Property;
|
||||
import org.opensearch.core.internal.io.IOUtils;
|
||||
|
@ -60,6 +61,8 @@ import java.util.Set;
|
|||
|
||||
public class FsDirectoryFactory implements IndexStorePlugin.DirectoryFactory {
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(FsDirectoryFactory.class);
|
||||
|
||||
public static final Setting<LockFactory> INDEX_LOCK_FACTOR_SETTING = new Setting<>("index.store.fs.fs_lock", "native", (s) -> {
|
||||
switch (s) {
|
||||
case "native":
|
||||
|
@ -104,6 +107,9 @@ public class FsDirectoryFactory implements IndexStorePlugin.DirectoryFactory {
|
|||
case MMAPFS:
|
||||
return setPreload(new MMapDirectory(location, lockFactory), lockFactory, preLoadExtensions);
|
||||
case SIMPLEFS:
|
||||
DEPRECATION_LOGGER.deprecate(IndexModule.Type.SIMPLEFS.getSettingsKey(), IndexModule.Type.SIMPLEFS.getSettingsKey()
|
||||
+ " is no longer supported and will be removed in 2.0. Use [" + IndexModule.Type.NIOFS.getSettingsKey()
|
||||
+ "], which offers equal or better performance, instead.");
|
||||
return new SimpleFSDirectory(location, lockFactory);
|
||||
case NIOFS:
|
||||
return new NIOFSDirectory(location, lockFactory);
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.apache.lucene.store.IOContext;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
|
@ -463,7 +463,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
|
||||
Logger logger) throws IOException {
|
||||
try (ShardLock lock = shardLocker.lock(shardId, "read metadata snapshot", TimeUnit.SECONDS.toMillis(5));
|
||||
Directory dir = new SimpleFSDirectory(indexLocation)) {
|
||||
Directory dir = new NIOFSDirectory(indexLocation)) {
|
||||
failIfCorrupted(dir);
|
||||
return new MetadataSnapshot(null, dir, logger);
|
||||
} catch (IndexNotFoundException ex) {
|
||||
|
@ -484,7 +484,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
|
||||
Logger logger) throws IOException, ShardLockObtainFailedException {
|
||||
try (ShardLock lock = shardLocker.lock(shardId, "open index", TimeUnit.SECONDS.toMillis(5));
|
||||
Directory dir = new SimpleFSDirectory(indexLocation)) {
|
||||
Directory dir = new NIOFSDirectory(indexLocation)) {
|
||||
failIfCorrupted(dir);
|
||||
SegmentInfos segInfo = Lucene.readSegmentInfos(dir);
|
||||
logger.trace("{} loaded segment info [{}]", shardId, segInfo);
|
||||
|
|
|
@ -41,8 +41,8 @@ import org.apache.lucene.store.DataOutput;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.store.OutputStreamIndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.opensearch.common.io.Channels;
|
||||
import org.opensearch.index.seqno.SequenceNumbers;
|
||||
|
||||
|
@ -191,7 +191,7 @@ final class Checkpoint {
|
|||
}
|
||||
|
||||
public static Checkpoint read(Path path) throws IOException {
|
||||
try (Directory dir = new SimpleFSDirectory(path.getParent())) {
|
||||
try (Directory dir = new NIOFSDirectory(path.getParent())) {
|
||||
try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) {
|
||||
// We checksum the entire file before we even go and parse it. If it's corrupted we barf right here.
|
||||
CodecUtil.checksumEntireFile(indexInput);
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.analysis.hunspell.Dictionary;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.OpenSearchException;
|
||||
import org.opensearch.core.internal.io.IOUtils;
|
||||
import org.opensearch.common.io.FileSystemUtils;
|
||||
|
@ -208,7 +208,7 @@ public class HunspellService {
|
|||
|
||||
affixStream = Files.newInputStream(affixFiles[0]);
|
||||
|
||||
try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
|
||||
try (Directory tmp = new NIOFSDirectory(env.tmpFile())) {
|
||||
return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
|
||||
}
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.opensearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.opensearch.common.xcontent.XContentFactory;
|
||||
import org.opensearch.env.Environment;
|
||||
import org.opensearch.index.Index;
|
||||
import org.opensearch.index.IndexModule;
|
||||
import org.opensearch.index.IndexNotFoundException;
|
||||
import org.opensearch.index.IndexSettings;
|
||||
import org.opensearch.index.mapper.MapperService;
|
||||
|
@ -983,12 +984,23 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase {
|
|||
true
|
||||
);
|
||||
|
||||
final List<String> validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true);
|
||||
final List<String> validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true);
|
||||
assertThat(validationErrors.size(), is(1));
|
||||
assertThat(validationErrors.get(0), is("expected [index.lifecycle.name] to be private but it was not"));
|
||||
}));
|
||||
}
|
||||
|
||||
public void testDeprecatedSimpleFSStoreSettings() {
|
||||
request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test");
|
||||
final Settings.Builder settings = Settings.builder();
|
||||
settings.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.SIMPLEFS.getSettingsKey());
|
||||
request.settings(settings.build());
|
||||
aggregateIndexSettings(ClusterState.EMPTY_STATE, request, Settings.EMPTY, null, Settings.EMPTY,
|
||||
IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), Collections.emptySet());
|
||||
assertWarnings("[simplefs] is deprecated and will be removed in 2.0. Use [niofs], which offers equal " +
|
||||
"or better performance, or other file systems instead.");
|
||||
}
|
||||
|
||||
private IndexTemplateMetadata addMatchingTemplate(Consumer<IndexTemplateMetadata.Builder> configurator) {
|
||||
IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*");
|
||||
configurator.accept(builder);
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.opensearch.cluster.ClusterModule;
|
||||
import org.opensearch.cluster.metadata.Metadata;
|
||||
|
@ -193,7 +193,7 @@ public class MetadataStateFormatTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
public static void corruptFile(Path fileToCorrupt, Logger logger) throws IOException {
|
||||
try (SimpleFSDirectory dir = new SimpleFSDirectory(fileToCorrupt.getParent())) {
|
||||
try (NIOFSDirectory dir = new NIOFSDirectory(fileToCorrupt.getParent())) {
|
||||
long checksumBeforeCorruption;
|
||||
try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
|
||||
checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
|
||||
|
@ -245,7 +245,7 @@ public class MetadataStateFormatTests extends OpenSearchTestCase {
|
|||
|
||||
private static void ensureOnlyOneStateFile(Path[] paths) throws IOException {
|
||||
for (Path path : paths) {
|
||||
try (Directory dir = new SimpleFSDirectory(path.resolve(MetadataStateFormat.STATE_DIR_NAME))) {
|
||||
try (Directory dir = new NIOFSDirectory(path.resolve(MetadataStateFormat.STATE_DIR_NAME))) {
|
||||
assertThat(dir.listAll().length, equalTo(1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.Version;
|
||||
import org.opensearch.cluster.ClusterName;
|
||||
import org.opensearch.cluster.ClusterState;
|
||||
|
@ -507,7 +507,7 @@ public class PersistedClusterStateServiceTests extends OpenSearchTestCase {
|
|||
}
|
||||
|
||||
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
|
||||
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
|
||||
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
|
||||
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
|
||||
|
@ -538,8 +538,8 @@ public class PersistedClusterStateServiceTests extends OpenSearchTestCase {
|
|||
|
||||
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
|
||||
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
|
||||
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
|
||||
Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
|
||||
Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
|
||||
indexWriter.addIndexes(dupDirectory);
|
||||
indexWriter.commit();
|
||||
|
@ -583,8 +583,8 @@ public class PersistedClusterStateServiceTests extends OpenSearchTestCase {
|
|||
|
||||
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
|
||||
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
|
||||
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
|
||||
Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
|
||||
Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
|
||||
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
|
||||
indexWriter.deleteDocuments(new Term("type", "global")); // do not duplicate global metadata
|
||||
indexWriter.addIndexes(dupDirectory);
|
||||
|
|
|
@ -161,6 +161,8 @@ public class FsDirectoryFactoryTests extends OpenSearchTestCase {
|
|||
assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory);
|
||||
break;
|
||||
case SIMPLEFS:
|
||||
assertWarnings("simplefs is no longer supported and will be removed in 2.0. Use [niofs], which offers equal "
|
||||
+ "or better performance, instead.");
|
||||
assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory);
|
||||
break;
|
||||
case FS:
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.lucene.analysis.hunspell.Dictionary;
|
|||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.NIOFSDirectory;
|
||||
import org.opensearch.LegacyESVersion;
|
||||
import org.opensearch.Version;
|
||||
import org.opensearch.cluster.metadata.IndexMetadata;
|
||||
|
@ -441,7 +441,7 @@ public class AnalysisModuleTests extends OpenSearchTestCase {
|
|||
InputStream aff = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.aff");
|
||||
InputStream dic = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.dic");
|
||||
Dictionary dictionary;
|
||||
try (Directory tmp = new SimpleFSDirectory(environment.tmpFile())) {
|
||||
try (Directory tmp = new NIOFSDirectory(environment.tmpFile())) {
|
||||
dictionary = new Dictionary(tmp, "hunspell", aff, dic);
|
||||
}
|
||||
AnalysisModule module = new AnalysisModule(environment, singletonList(new AnalysisPlugin() {
|
||||
|
|
|
@ -107,14 +107,14 @@ public class PluginsServiceTests extends OpenSearchTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put("my.setting", "test")
|
||||
.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.SIMPLEFS.getSettingsKey()).build();
|
||||
.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.NIOFS.getSettingsKey()).build();
|
||||
PluginsService service = newPluginsService(settings, AdditionalSettingsPlugin1.class);
|
||||
Settings newSettings = service.updatedSettings();
|
||||
assertEquals("test", newSettings.get("my.setting")); // previous settings still exist
|
||||
assertEquals("1", newSettings.get("foo.bar")); // added setting exists
|
||||
// does not override pre existing settings
|
||||
assertEquals(
|
||||
IndexModule.Type.SIMPLEFS.getSettingsKey(),
|
||||
IndexModule.Type.NIOFS.getSettingsKey(),
|
||||
newSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())
|
||||
);
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ public class PluginsServiceTests extends OpenSearchTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put("my.setting", "test")
|
||||
.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.SIMPLEFS.getSettingsKey()).build();
|
||||
.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.NIOFS.getSettingsKey()).build();
|
||||
PluginsService service = newPluginsService(settings, AdditionalSettingsPlugin1.class, FilterablePlugin.class);
|
||||
List<ScriptPlugin> scriptPlugins = service.filterPlugins(ScriptPlugin.class);
|
||||
assertEquals(1, scriptPlugins.size());
|
||||
|
@ -771,7 +771,7 @@ public class PluginsServiceTests extends OpenSearchTestCase {
|
|||
TestPlugin testPlugin = new TestPlugin();
|
||||
PluginsService.loadExtensions(Arrays.asList(
|
||||
Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin),
|
||||
Tuple.tuple(new PluginInfo("test", null, null, null, null, null, null, Collections.singletonList("extensible"), false),
|
||||
Tuple.tuple(new PluginInfo("test", null, null, null, null, null, null, Collections.singletonList("extensible"), false),
|
||||
testPlugin)
|
||||
));
|
||||
|
||||
|
|
Loading…
Reference in New Issue