Core: upgrade to current Lucene 5.0.0 snapshot
Elasticsearch no longer unlocks the Lucene index on startup (this was dangerous, and could possibly lead to corruption). Added the new serbian_normalization TokenFilter from Lucene. NoLockFactory is no longer supported (index.store.fs.fs_lock = none), and if you have a typo in your fs_lock you'll now hit a StoreException instead of silently using NoLockFactory. Closes #8588
This commit is contained in:
parent
866571f4d7
commit
dfb6d6081c
|
@ -34,3 +34,7 @@ Scandinavian::
|
|||
http://lucene.apache.org/core/4_9_0/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html[`scandinavian_normalization`],
|
||||
http://lucene.apache.org/core/4_9_0/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html[`scandinavian_folding`]
|
||||
|
||||
Serbian::
|
||||
|
||||
not-released-yet[`serbian_normalization`],
|
||||
|
||||
|
|
4
pom.xml
4
pom.xml
|
@ -32,7 +32,7 @@
|
|||
|
||||
<properties>
|
||||
<lucene.version>5.0.0</lucene.version>
|
||||
<lucene.maven.version>5.0.0-snapshot-1637347</lucene.maven.version>
|
||||
<lucene.maven.version>5.0.0-snapshot-1641343</lucene.maven.version>
|
||||
<tests.jvms>auto</tests.jvms>
|
||||
<tests.shuffle>true</tests.shuffle>
|
||||
<tests.output>onerror</tests.output>
|
||||
|
@ -54,7 +54,7 @@
|
|||
</repository>
|
||||
<repository>
|
||||
<id>Lucene snapshots</id>
|
||||
<url>https://download.elasticsearch.org/lucenesnapshots/1637347</url>
|
||||
<url>https://download.elasticsearch.org/lucenesnapshots/1641343</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.env;
|
|||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
|
@ -82,10 +84,10 @@ public class NodeEnvironment extends AbstractComponent implements Closeable{
|
|||
if (Files.exists(dir) == false) {
|
||||
Files.createDirectories(dir);
|
||||
}
|
||||
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
|
||||
try {
|
||||
NativeFSLockFactory lockFactory = new NativeFSLockFactory(dir);
|
||||
Lock tmpLock = lockFactory.makeLock("node.lock");
|
||||
|
||||
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
||||
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
|
||||
Lock tmpLock = luceneDir.makeLock("node.lock");
|
||||
boolean obtained = tmpLock.obtain();
|
||||
if (obtained) {
|
||||
locks[dirIndex] = tmpLock;
|
||||
|
|
|
@ -511,6 +511,7 @@ public class AnalysisModule extends AbstractModule {
|
|||
tokenFiltersBindings.processTokenFilter("persian_normalization", PersianNormalizationFilterFactory.class);
|
||||
tokenFiltersBindings.processTokenFilter("scandinavian_normalization", ScandinavianNormalizationFilterFactory.class);
|
||||
tokenFiltersBindings.processTokenFilter("scandinavian_folding", ScandinavianFoldingFilterFactory.class);
|
||||
tokenFiltersBindings.processTokenFilter("serbian_normalization", SerbianNormalizationFilterFactory.class);
|
||||
|
||||
tokenFiltersBindings.processTokenFilter("hunspell", HunspellTokenFilterFactory.class);
|
||||
tokenFiltersBindings.processTokenFilter("cjk_bigram", CJKBigramFilterFactory.class);
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.sr.SerbianNormalizationFilter;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.assistedinject.Assisted;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SerbianNormalizationFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
@Inject
|
||||
public SerbianNormalizationFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
|
||||
super(index, indexSettings, name, settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new SerbianNormalizationFilter(tokenStream);
|
||||
}
|
||||
}
|
|
@ -1434,11 +1434,6 @@ public class InternalEngine extends AbstractIndexShardComponent implements Engin
|
|||
|
||||
private IndexWriter createWriter() throws IOException {
|
||||
try {
|
||||
// release locks when started
|
||||
if (IndexWriter.isLocked(store.directory())) {
|
||||
logger.warn("shard is locked, releasing lock");
|
||||
IndexWriter.unlock(store.directory());
|
||||
}
|
||||
boolean create = !Lucene.indexExists(store.directory());
|
||||
IndexWriterConfig config = new IndexWriterConfig(analysisService.defaultIndexAnalyzer());
|
||||
config.setCommitOnClose(false); // we by default don't commit on close
|
||||
|
|
|
@ -264,7 +264,7 @@ public class BinaryFieldMapper extends AbstractFieldMapper<BytesReference> {
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
|
|
@ -404,7 +404,7 @@ public class DoubleFieldMapper extends NumberFieldMapper<Double> {
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
|
|
@ -409,7 +409,7 @@ public class FloatFieldMapper extends NumberFieldMapper<Float> {
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
|
|
@ -433,7 +433,7 @@ public abstract class NumberFieldMapper<T extends Number> extends AbstractFieldM
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
@ -484,7 +484,7 @@ public abstract class NumberFieldMapper<T extends Number> extends AbstractFieldM
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ public class GeoPointFieldMapper extends AbstractFieldMapper<GeoPoint> implement
|
|||
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValuesType.BINARY);
|
||||
TYPE.setDocValuesType(DocValuesType.BINARY);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
|
|
@ -151,6 +151,11 @@ public class ConcurrentMergeSchedulerProvider extends MergeSchedulerProvider {
|
|||
super.afterMerge(merge);
|
||||
provider.afterMerge(merge);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void maybeStall() {
|
||||
// Don't stall here, because we do our own index throttling (in InternalEngine.IndexThrottle) when merges can't keep up
|
||||
}
|
||||
}
|
||||
|
||||
class ApplySettings implements IndexSettingsService.Listener {
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
* A directory implementation that uses the Elasticsearch {@link Distributor} abstraction to distribute
|
||||
* files across multiple data directories.
|
||||
*/
|
||||
public final class DistributorDirectory extends BaseDirectory {
|
||||
public final class DistributorDirectory extends Directory {
|
||||
|
||||
private final Distributor distributor;
|
||||
private final HashMap<String, Directory> nameDirMapping = new HashMap<>();
|
||||
|
@ -74,7 +74,6 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
nameDirMapping.put(file, dir);
|
||||
}
|
||||
}
|
||||
lockFactory = new DistributorLockFactoryWrapper(distributor.primary());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,7 +131,6 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
} finally {
|
||||
IOUtils.close(distributor.all());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,7 +138,7 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
*
|
||||
* @throws IOException if the name has not yet been associated with any directory ie. fi the file does not exists
|
||||
*/
|
||||
Directory getDirectory(String name) throws IOException { // pkg private for testing
|
||||
synchronized Directory getDirectory(String name) throws IOException { // pkg private for testing
|
||||
return getDirectory(name, true);
|
||||
}
|
||||
|
||||
|
@ -148,7 +146,7 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
* Returns the directory that has previously been associated with this file name or associates the name with a directory
|
||||
* if failIfNotAssociated is set to false.
|
||||
*/
|
||||
private Directory getDirectory(String name, boolean failIfNotAssociated) throws IOException {
|
||||
private synchronized Directory getDirectory(String name, boolean failIfNotAssociated) throws IOException {
|
||||
final Directory directory = nameDirMapping.get(name);
|
||||
if (directory == null) {
|
||||
if (failIfNotAssociated) {
|
||||
|
@ -164,17 +162,6 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
return directory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setLockFactory(LockFactory lockFactory) throws IOException {
|
||||
distributor.primary().setLockFactory(lockFactory);
|
||||
super.setLockFactory(new DistributorLockFactoryWrapper(distributor.primary()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized String getLockID() {
|
||||
return distributor.primary().getLockID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized String toString() {
|
||||
return distributor.toString();
|
||||
|
@ -201,8 +188,8 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
.append(System.lineSeparator());
|
||||
} else if (directory != d) {
|
||||
consistent = false;
|
||||
builder.append("File ").append(file).append(" was mapped to a directory ").append(directory)
|
||||
.append(" but exists in another distributor directory").append(d)
|
||||
builder.append("File ").append(file).append(" was mapped to a directory ").append(directory)
|
||||
.append(" but exists in another distributor directory ").append(d)
|
||||
.append(System.lineSeparator());
|
||||
}
|
||||
|
||||
|
@ -212,86 +199,41 @@ public final class DistributorDirectory extends BaseDirectory {
|
|||
return consistent; // return boolean so it can be easily be used in asserts
|
||||
}
|
||||
|
||||
/**
|
||||
* This inner class is a simple wrapper around the original
|
||||
* lock factory to track files written / created through the
|
||||
* lock factory. For instance {@link NativeFSLockFactory} creates real
|
||||
* files that we should expose for consistency reasons.
|
||||
*/
|
||||
private class DistributorLockFactoryWrapper extends LockFactory {
|
||||
private final Directory dir;
|
||||
private final LockFactory delegate;
|
||||
private final boolean writesFiles;
|
||||
|
||||
public DistributorLockFactoryWrapper(Directory dir) {
|
||||
this.dir = dir;
|
||||
final FSDirectory leaf = DirectoryUtils.getLeaf(dir, FSDirectory.class);
|
||||
if (leaf != null) {
|
||||
writesFiles = leaf.getLockFactory() instanceof FSLockFactory;
|
||||
} else {
|
||||
writesFiles = false;
|
||||
}
|
||||
this.delegate = dir.getLockFactory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockPrefix(String lockPrefix) {
|
||||
delegate.setLockPrefix(lockPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockPrefix() {
|
||||
return delegate.getLockPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String lockName) {
|
||||
return new DistributorLock(delegate.makeLock(lockName), lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String lockName) throws IOException {
|
||||
delegate.clearLock(lockName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DistributorLockFactoryWrapper(" + delegate.toString() + ")";
|
||||
}
|
||||
|
||||
private class DistributorLock extends Lock {
|
||||
private final Lock delegateLock;
|
||||
private final String name;
|
||||
|
||||
DistributorLock(Lock delegate, String name) {
|
||||
this.delegateLock = delegate;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
if (delegateLock.obtain()) {
|
||||
if (writesFiles) {
|
||||
synchronized (DistributorDirectory.this) {
|
||||
assert (nameDirMapping.containsKey(name) == false || nameDirMapping.get(name) == dir);
|
||||
if (nameDirMapping.get(name) == null) {
|
||||
nameDirMapping.put(name, dir);
|
||||
@Override
|
||||
public Lock makeLock(final String lockName) {
|
||||
final Directory primary = distributor.primary();
|
||||
final Lock delegateLock = primary.makeLock(lockName);
|
||||
if (DirectoryUtils.getLeaf(primary, FSDirectory.class) != null) {
|
||||
// Wrap the delegate's lock just so we can monitor when it actually wrote a lock file. We assume that an FSDirectory writes its
|
||||
// locks as actual files (we don't support NoLockFactory):
|
||||
return new Lock() {
|
||||
@Override
|
||||
public boolean obtain() throws IOException {
|
||||
if (delegateLock.obtain()) {
|
||||
synchronized(DistributorDirectory.this) {
|
||||
assert nameDirMapping.containsKey(lockName) == false || nameDirMapping.get(lockName) == primary;
|
||||
if (nameDirMapping.get(lockName) == null) {
|
||||
nameDirMapping.put(lockName, primary);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException { delegateLock.close(); }
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegateLock.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
return delegateLock.isLocked();
|
||||
}
|
||||
@Override
|
||||
public boolean isLocked() throws IOException {
|
||||
return delegateLock.isLocked();
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return delegateLock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
|
||||
package org.elasticsearch.index.store.fs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.store.*;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
|
@ -27,11 +32,7 @@ import org.elasticsearch.index.settings.IndexSettings;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.DirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import org.elasticsearch.index.store.StoreException;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -58,16 +59,17 @@ public abstract class FsDirectoryService extends DirectoryService implements Sto
|
|||
|
||||
protected final LockFactory buildLockFactory() throws IOException {
|
||||
String fsLock = componentSettings.get("lock", componentSettings.get("fs_lock", "native"));
|
||||
LockFactory lockFactory = NoLockFactory.getNoLockFactory();
|
||||
LockFactory lockFactory;
|
||||
if (fsLock.equals("native")) {
|
||||
lockFactory = new NativeFSLockFactory();
|
||||
lockFactory = NativeFSLockFactory.INSTANCE;
|
||||
} else if (fsLock.equals("simple")) {
|
||||
lockFactory = new SimpleFSLockFactory();
|
||||
} else if (fsLock.equals("none")) {
|
||||
lockFactory = NoLockFactory.getNoLockFactory();
|
||||
lockFactory = SimpleFSLockFactory.INSTANCE;
|
||||
} else {
|
||||
throw new StoreException(shardId, "unrecognized fs_lock \"" + fsLock + "\": must be native or simple");
|
||||
}
|
||||
return lockFactory;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Directory[] build() throws IOException {
|
||||
|
|
|
@ -632,9 +632,9 @@ public class GeolocationContextMapping extends ContextMapping {
|
|||
for (int i = 0 ; i < lonFields.length ; i++) {
|
||||
IndexableField lonField = lonFields[i];
|
||||
IndexableField latField = latFields[i];
|
||||
assert lonField.fieldType().docValueType() == latField.fieldType().docValueType();
|
||||
assert lonField.fieldType().docValuesType() == latField.fieldType().docValuesType();
|
||||
// we write doc values fields differently: one field for all values, so we need to only care about indexed fields
|
||||
if (lonField.fieldType().docValueType() == DocValuesType.NONE) {
|
||||
if (lonField.fieldType().docValuesType() == DocValuesType.NONE) {
|
||||
spare.reset(latField.numericValue().doubleValue(), lonField.numericValue().doubleValue());
|
||||
geohashes.add(spare.geohash());
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ public class AnalysisFactoryTests extends ElasticsearchTestCase {
|
|||
put("russianlightstem", StemmerTokenFilterFactory.class);
|
||||
put("scandinavianfolding", ScandinavianFoldingFilterFactory.class);
|
||||
put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class);
|
||||
put("serbiannormalization", SerbianNormalizationFilterFactory.class);
|
||||
put("shingle", ShingleTokenFilterFactory.class);
|
||||
put("snowballporter", SnowballTokenFilterFactory.class);
|
||||
put("soraninormalization", SoraniNormalizationFilterFactory.class);
|
||||
|
|
|
@ -348,6 +348,9 @@ public class InternalEngineTests extends ElasticsearchTestCase {
|
|||
}
|
||||
|
||||
public void testStartAndAcquireConcurrently() {
|
||||
// Close engine from setUp (we create our own):
|
||||
engine.close();
|
||||
|
||||
ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS));
|
||||
final Engine engine = createEngine(engineSettingsService, store, createTranslog(), mergeSchedulerProvider);
|
||||
final AtomicBoolean startPending = new AtomicBoolean(true);
|
||||
|
@ -377,6 +380,9 @@ public class InternalEngineTests extends ElasticsearchTestCase {
|
|||
|
||||
@Test
|
||||
public void testSegmentsWithMergeFlag() throws Exception {
|
||||
// Close engine from setUp (we create our own):
|
||||
engine.close();
|
||||
|
||||
ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS));
|
||||
final AtomicReference<CountDownLatch> waitTillMerge = new AtomicReference<>();
|
||||
final AtomicReference<CountDownLatch> waitForMerge = new AtomicReference<>();
|
||||
|
@ -1307,6 +1313,9 @@ public class InternalEngineTests extends ElasticsearchTestCase {
|
|||
@Test
|
||||
public void testEnableGcDeletes() throws Exception {
|
||||
|
||||
// Close engine from setUp (we create our own):
|
||||
engine.close();
|
||||
|
||||
// Make sure enableGCDeletes == false works:
|
||||
Settings settings = ImmutableSettings.builder()
|
||||
.put(InternalEngine.INDEX_GC_DELETES, "0ms")
|
||||
|
|
|
@ -107,7 +107,7 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest {
|
|||
assertEquals(ft1.omitNorms(), ft2.omitNorms());
|
||||
assertEquals(ft1.indexOptions(), ft2.indexOptions());
|
||||
assertEquals(ft1.storeTermVectors(), ft2.storeTermVectors());
|
||||
assertEquals(ft1.docValueType(), ft2.docValueType());
|
||||
assertEquals(ft1.docValuesType(), ft2.docValuesType());
|
||||
}
|
||||
|
||||
private void assertParseIdemPotent(IndexableFieldType expected, DocumentMapper mapper) throws Exception {
|
||||
|
@ -327,8 +327,8 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest {
|
|||
|
||||
public static DocValuesType docValuesType(Document document, String fieldName) {
|
||||
for (IndexableField field : document.getFields(fieldName)) {
|
||||
if (field.fieldType().docValueType() != DocValuesType.NONE) {
|
||||
return field.fieldType().docValueType();
|
||||
if (field.fieldType().docValuesType() != DocValuesType.NONE) {
|
||||
return field.fieldType().docValuesType();
|
||||
}
|
||||
}
|
||||
return DocValuesType.NONE;
|
||||
|
|
|
@ -164,7 +164,7 @@ public class DistributorTests extends ElasticsearchTestCase {
|
|||
|
||||
|
||||
public FakeFsDirectory(String path, long usableSpace) throws IOException {
|
||||
super(Paths.get(path), NoLockFactory.getNoLockFactory());
|
||||
super(Paths.get(path), NoLockFactory.INSTANCE);
|
||||
allocationCount = 0;
|
||||
this.useableSpace = usableSpace;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue