LUCENE-6171: pass the StandardOpenOption.CREATE_NEW so the filesystem ensures Lucene really is write-once

This commit is contained in:
Mike McCandless 2016-06-14 15:15:57 -04:00
parent 5eabffc797
commit a893c64595
28 changed files with 11 additions and 176 deletions

View File

@ -42,6 +42,11 @@ Improvements
IndexReader after (illegally) removing the old index and
reindexing (Vitaly Funstein, Robert Muir, Mike McCandless)
* LUCENE-6171: Lucene now passes the StandardOpenOption.CREATE_NEW
option when writing new files so the filesystem enforces our
write-once architecture, possibly catching externally caused
issues sooner (Robert Muir, Mike McCandless)
Optimizations
* LUCENE-7330: Speed up conjunction queries. (Adrien Grand)

View File

@ -264,7 +264,7 @@ public abstract class FSDirectory extends BaseDirectory {
continue;
}
return new FSIndexOutput(name,
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
} catch (FileAlreadyExistsException faee) {
// Retry with next incremented name
}
@ -401,7 +401,7 @@ public abstract class FSDirectory extends BaseDirectory {
static final int CHUNK_SIZE = 8192;
public FSIndexOutput(String name) throws IOException {
this(name, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE);
this(name, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
}
FSIndexOutput(String name, OpenOption... options) throws IOException {

View File

@ -28,7 +28,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
import org.apache.lucene.util.LuceneTestCase;
@ -88,11 +87,6 @@ public class TestAllFilesCheckIndexHeader extends LuceneTestCase {
try (BaseDirectoryWrapper dirCopy = newDirectory()) {
dirCopy.setCheckIndexOnClose(false);
if (dirCopy instanceof MockDirectoryWrapper) {
// The while(true) loop below, under rarish circumstances, can sometimes double write:
((MockDirectoryWrapper) dirCopy).setPreventDoubleWrite(false);
}
long victimLength = dir.fileLength(victim);
int wrongBytes = TestUtil.nextInt(random(), 1, (int) Math.min(100, victimLength));
assert victimLength > 0;

View File

@ -213,9 +213,6 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
public void testNoWaitClose() throws IOException {
Directory directory = newDirectory();
if (directory instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) directory).setPreventDoubleWrite(false);
}
Document doc = new Document();
Field idField = newStringField("id", "", Field.Store.YES);
doc.add(idField);

View File

@ -104,7 +104,6 @@ public class TestCrash extends LuceneTestCase {
// running / store files could be open when we crash:
dir.setAssertNoUnrefencedFilesOnClose(false);
dir.setPreventDoubleWrite(false);
if (VERBOSE) {
System.out.println("TEST: now crash");
}

View File

@ -46,9 +46,6 @@ public class TestIndexFileDeleter extends LuceneTestCase {
public void testDeleteLeftoverFiles() throws IOException {
Directory dir = newDirectory();
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
}
MergePolicy mergePolicy = newLogMergePolicy(true, 10);

View File

@ -939,10 +939,6 @@ public class TestIndexWriter extends LuceneTestCase {
// LUCENE-2239: won't work with NIOFS/MMAP
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
// When interrupt arrives in w.close(), this can
// lead to double-write of files:
dir.setPreventDoubleWrite(false);
// open/close slowly sometimes
dir.setUseSlowOpenClosers(true);

View File

@ -139,12 +139,6 @@ public class TestIndexWriterCommit extends LuceneTestCase {
.setOpenMode(OpenMode.APPEND)
.setMaxBufferedDocs(10));
// On abort, writer in fact may write to the same
// segments_N file:
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
}
for(int i=0;i<12;i++) {
for(int j=0;j<17;j++) {
TestIndexWriter.addDoc(writer);
@ -263,12 +257,6 @@ public class TestIndexWriterCommit extends LuceneTestCase {
*/
public void testCommitOnCloseForceMerge() throws IOException {
Directory dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
// writing to same file more than once
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
}
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(new MockAnalyzer(random()))
@ -555,14 +543,6 @@ public class TestIndexWriterCommit extends LuceneTestCase {
public void testPrepareCommitRollback() throws IOException {
Directory dir = newDirectory();
MockDirectoryWrapper mockDir;
if (dir instanceof MockDirectoryWrapper) {
mockDir = (MockDirectoryWrapper) dir;
mockDir.setPreventDoubleWrite(false);
} else {
mockDir = null;
}
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(new MockAnalyzer(random()))

View File

@ -522,7 +522,6 @@ public class TestIndexWriterDelete extends LuceneTestCase {
System.out.println("TEST: cycle");
}
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(startDir));
dir.setPreventDoubleWrite(false);
dir.setAllowRandomFileNotFoundException(false);
IndexWriter modifier = new IndexWriter(dir,
newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))

View File

@ -65,7 +65,6 @@ public class TestIndexWriterExceptions2 extends LuceneTestCase {
if (dir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
((MockDirectoryWrapper)dir).setUseSlowOpenClosers(false);
((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
}
// log all exceptions we hit, in case we fail (for debugging)

View File

@ -25,7 +25,6 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
@ -158,10 +157,6 @@ public class TestIndexWriterFromReader extends LuceneTestCase {
public void testRandom() throws Exception {
Directory dir = newDirectory();
if (dir instanceof MockDirectoryWrapper) {
// Since we rollback writer we can easily try to write to the same filenames:
((MockDirectoryWrapper) dir).setPreventDoubleWrite(false);
}
int numOps = atLeast(100);
@ -293,10 +288,6 @@ public class TestIndexWriterFromReader extends LuceneTestCase {
public void testConsistentFieldNumbers() throws Exception {
Directory dir = newDirectory();
if (dir instanceof MockDirectoryWrapper) {
// Since we use IW.rollback and then open another, the 2nd IW can easily write to the same segment name:
((MockDirectoryWrapper) dir).setPreventDoubleWrite(false);
}
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
// Empty first commit:
w.commit();

View File

@ -28,7 +28,6 @@ import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LuceneTestCase;
public class TestIndexWriterMerging extends LuceneTestCase {
@ -360,10 +359,6 @@ public class TestIndexWriterMerging extends LuceneTestCase {
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
if (directory instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) directory).setPreventDoubleWrite(false);
}
final Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setTokenized(false);

View File

@ -253,7 +253,6 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// Make a new dir that will enforce disk usage:
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(startDir));
dir.setPreventDoubleWrite(false);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
.setOpenMode(OpenMode.APPEND)
.setMergePolicy(newLogMergePolicy(false));

View File

@ -35,7 +35,6 @@ import org.apache.lucene.util.TestUtil;
public class TestIndexWriterOutOfFileDescriptors extends LuceneTestCase {
public void test() throws Exception {
MockDirectoryWrapper dir = newMockFSDirectory(createTempDir("TestIndexWriterOutOfFileDescriptors"));
dir.setPreventDoubleWrite(false);
double rate = random().nextDouble()*0.01;
//System.out.println("rate=" + rate);
dir.setRandomIOExceptionRateOnOpen(rate);

View File

@ -578,9 +578,6 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// LUCENE-4147
public void testRollbackAndCommitWithThreads() throws Exception {
final BaseDirectoryWrapper d = newDirectory();
if (d instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)d).setPreventDoubleWrite(false);
}
final int threadCount = TestUtil.nextInt(random(), 2, 6);

View File

@ -40,9 +40,6 @@ public class TestNRTReaderCleanup extends LuceneTestCase {
MockDirectoryWrapper dir = newMockDirectory();
// Allow writing to same file more than once:
dir.setPreventDoubleWrite(false);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.setMergeFactor(2);

View File

@ -233,8 +233,6 @@ public class TestTransactions extends LuceneTestCase {
// we cant use non-ramdir on windows, because this test needs to double-write.
MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random(), new RAMDirectory());
MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random(), new RAMDirectory());
dir1.setPreventDoubleWrite(false);
dir2.setPreventDoubleWrite(false);
dir1.failOn(new RandomFailure());
dir2.failOn(new RandomFailure());
dir1.setFailOnOpenInput(false);

View File

@ -34,7 +34,6 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.OfflineSorter.BufferSize;
import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
import org.apache.lucene.util.OfflineSorter.SortInfo;
@ -260,9 +259,6 @@ public class TestOfflineSorter extends LuceneTestCase {
public void testBitFlippedOnInput1() throws Exception {
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {
@Override
@ -290,9 +286,6 @@ public class TestOfflineSorter extends LuceneTestCase {
public void testBitFlippedOnInput2() throws Exception {
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {
@Override
@ -343,9 +336,6 @@ public class TestOfflineSorter extends LuceneTestCase {
public void testBitFlippedOnPartition1() throws Exception {
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {
@ -377,9 +367,6 @@ public class TestOfflineSorter extends LuceneTestCase {
public void testBitFlippedOnPartition2() throws Exception {
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {

View File

@ -808,9 +808,6 @@ public class TestBKD extends LuceneTestCase {
}
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {
boolean corrupted;
@ -857,9 +854,6 @@ public class TestBKD extends LuceneTestCase {
}
try (Directory dir0 = newMockDirectory()) {
if (dir0 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dir0).setPreventDoubleWrite(false);
}
Directory dir = new FilterDirectory(dir0) {
boolean corrupted;

View File

@ -92,7 +92,6 @@ public class TestFSTs extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
dir = newMockDirectory();
dir.setPreventDoubleWrite(false);
}
@Override

View File

@ -43,7 +43,6 @@ public class TestFSTsMisc extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
dir = newMockDirectory();
dir.setPreventDoubleWrite(false);
}
@Override

View File

@ -302,18 +302,6 @@ public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase {
client.close();
callback.close();
// Replicator violates write-once policy. It may be that the
// handler copies files to the index dir, then fails to copy a
// file and reverts the copy operation. On the next attempt, it
// will copy the same file again. There is nothing wrong with this
// in a real system, but it does violate write-once, and MDW
// doesn't like it. Disabling it means that we won't catch cases
// where the handler overwrites an existing index file, but
// there's nothing currently we can do about it, unless we don't
// use MDW.
handlerIndexDir.setPreventDoubleWrite(false);
handlerTaxoDir.setPreventDoubleWrite(false);
// wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors
final SourceDirectoryFactory in = sourceDirFactory;
final AtomicInteger failures = new AtomicInteger(atLeast(10));

View File

@ -219,17 +219,6 @@ public class IndexReplicationClientTest extends ReplicatorTestCase {
client.close();
callback.close();
// Replicator violates write-once policy. It may be that the
// handler copies files to the index dir, then fails to copy a
// file and reverts the copy operation. On the next attempt, it
// will copy the same file again. There is nothing wrong with this
// in a real system, but it does violate write-once, and MDW
// doesn't like it. Disabling it means that we won't catch cases
// where the handler overwrites an existing index file, but
// there's nothing currently we can do about it, unless we don't
// use MDW.
handlerDir.setPreventDoubleWrite(false);
// wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors
final SourceDirectoryFactory in = sourceDirFactory;
final AtomicInteger failures = new AtomicInteger(atLeast(10));

View File

@ -393,7 +393,6 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
MockDirectoryWrapper dir = newMockDirectory();
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
dir.setUseSlowOpenClosers(false);
dir.setPreventDoubleWrite(false);
dir.setRandomIOExceptionRate(0.001); // more rare
// log all exceptions we hit, in case we fail (for debugging)

View File

@ -123,42 +123,6 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
dir.close();
}
// TODO: are these semantics really needed by lucene? can we just throw exception?
public void testCopyOverwrite() throws Exception {
Directory source = getDirectory(createTempDir("testCopyOverwrite"));
Directory dest = newDirectory();
// we are double-writing intentionally, because thats the api
if (dest instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper) dest).setPreventDoubleWrite(false);
}
IndexOutput output = source.createOutput("foobar", newIOContext(random()));
int numBytes = random().nextInt(20000);
byte bytes[] = new byte[numBytes];
random().nextBytes(bytes);
output.writeBytes(bytes, bytes.length);
output.close();
// create foobaz first, it should be overwritten
IndexOutput output2 = dest.createOutput("foobaz", newIOContext(random()));
output2.writeString("bogus!");
output2.close();
dest.copyFrom(source, "foobar", "foobaz", newIOContext(random()));
assertTrue(slowFileExists(dest, "foobaz"));
IndexInput input = dest.openInput("foobaz", newIOContext(random()));
byte bytes2[] = new byte[numBytes];
input.readBytes(bytes2, 0, bytes2.length);
assertEquals(input.length(), numBytes);
input.close();
assertArrayEquals(bytes, bytes2);
IOUtils.close(source, dest);
}
public void testDeleteFile() throws Exception {
Directory dir = getDirectory(createTempDir("testDeleteFile"));
int count = dir.listAll().length;
@ -1240,24 +1204,10 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
fsDir.deleteFile(fileName);
});
// Make sure we cannot open it for reading:
expectThrows(NoSuchFileException.class, () -> {
fsDir.openInput(fileName, IOContext.DEFAULT);
});
if (random().nextBoolean()) {
try (IndexOutput out = fsDir.createOutput(fileName + "z", IOContext.DEFAULT)) {
out.getFilePointer(); // just fake access to prevent compiler warning
}
// Make sure we can rename onto the deleted file:
fsDir.renameFile(fileName + "z", fileName);
} else {
// write the file again
try (IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT)) {
out.getFilePointer(); // just fake access to prevent compiler warning
}
}
assertEquals(0, fsDir.fileLength(fileName));
assertTrue(Arrays.asList(fsDir.listAll()).contains(fileName));
}
}

View File

@ -76,7 +76,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
double randomIOExceptionRateOnOpen;
Random randomState;
boolean assertNoDeleteOpenFile = false;
boolean preventDoubleWrite = true;
boolean trackDiskUsage = false;
boolean useSlowOpenClosers = LuceneTestCase.TEST_NIGHTLY;
boolean allowRandomFileNotFoundException = true;
@ -146,12 +145,6 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
trackDiskUsage = v;
}
/** If set to true, we throw an IOException if the same
* file is opened by createOutput, ever. */
public void setPreventDoubleWrite(boolean value) {
preventDoubleWrite = value;
}
/** If set to true (the default), when we throw random
* IOException on openInput or createOutput, we may
* sometimes throw FileNotFoundException or
@ -240,6 +233,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
unSyncedFiles.add(dest);
}
openFilesDeleted.remove(source);
createdFiles.remove(source);
createdFiles.add(dest);
}
}
@ -622,7 +616,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
}
init();
synchronized(this) {
if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen")) {
if (createdFiles.contains(name) && !name.equals("segments.gen")) {
throw new IOException("file \"" + name + "\" was already written to");
}
}
@ -639,7 +633,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
RAMFile existing = ramdir.fileMap.get(name);
// Enforce write once:
if (existing!=null && !name.equals("segments.gen") && preventDoubleWrite) {
if (existing!=null && !name.equals("segments.gen")) {
throw new IOException("file " + name + " already exists");
} else {
if (existing!=null) {

View File

@ -63,11 +63,6 @@ public class MockDirectoryFactory extends EphemeralDirectoryFactory {
// and check index fails
mockDirWrapper.setCheckIndexOnClose(false);
// if we enable this, TestReplicationHandler fails when it
// tries to write to index.properties after the file has
// already been created.
mockDirWrapper.setPreventDoubleWrite(false);
if (allowReadingFilesStillOpenForWrite) {
mockDirWrapper.setAllowReadingFilesStillOpenForWrite(true);
}

View File

@ -49,7 +49,6 @@ public class MockFSDirectoryFactory extends StandardDirectoryFactory {
if (cdir instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)cdir).setAssertNoUnrefencedFilesOnClose(false);
((MockDirectoryWrapper)cdir).setPreventDoubleWrite(false);
}
return dir;
}