names) throws IOException;
-
+
+ /**
+ * Renames {@code source} to {@code dest} as an atomic operation,
+ * where {@code dest} does not yet exist in the directory.
+ *
+ * Notes: This method is used by IndexWriter to publish commits.
+ * It is ok if this operation is not truly atomic, for example
+ * both {@code source} and {@code dest} can be visible temporarily.
+ * It is just important that the contents of {@code dest} appear
+ * atomically, or an exception is thrown.
+ */
+ public abstract void renameFile(String source, String dest) throws IOException;
+
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
index 7aad29d1e76..7c64686a003 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
@@ -27,6 +27,7 @@ import java.io.FilenameFilter;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
@@ -292,14 +293,17 @@ public abstract class FSDirectory extends BaseDirectory {
fsync(name);
}
- // fsync the directory itsself, but only if there was any file fsynced before
- // (otherwise it can happen that the directory does not yet exist)!
- if (!toSync.isEmpty()) {
- IOUtils.fsync(directory, true);
- }
-
staleFiles.removeAll(toSync);
}
+
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ ensureOpen();
+ Files.move(new File(directory, source).toPath(), new File(directory, dest).toPath(), StandardCopyOption.ATOMIC_MOVE);
+ // TODO: should we move directory fsync to a separate 'syncMetadata' method?
+ // for example, to improve listCommits(), IndexFileDeleter could also call that after deleting segments_Ns
+ IOUtils.fsync(directory, true);
+ }
@Override
public String getLockID() {
diff --git a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
index d4e5662a738..9b13f65076e 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java
@@ -18,6 +18,7 @@ package org.apache.lucene.store;
*/
import java.io.IOException;
+import java.nio.file.AtomicMoveNotSupportedException;
import java.util.ArrayList;
import java.util.Collection;
@@ -162,6 +163,17 @@ public class FileSwitchDirectory extends BaseDirectory {
secondaryDir.sync(secondaryNames);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ Directory sourceDir = getDirectory(source);
+ // won't happen with standard lucene index files since pending and commit will
+ // always have the same extension ("")
+ if (sourceDir != getDirectory(dest)) {
+ throw new AtomicMoveNotSupportedException(source, dest, "source and dest are in different directories");
+ }
+ sourceDir.renameFile(source, dest);
+ }
+
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
return getDirectory(name).openInput(name, context);
diff --git a/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
index f3bb5a06158..ddaa529337d 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FilterDirectory.java
@@ -69,6 +69,11 @@ public class FilterDirectory extends Directory {
in.sync(names);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ in.renameFile(source, dest);
+ }
+
@Override
public IndexInput openInput(String name, IOContext context)
throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java
index 96883254214..351445686a8 100644
--- a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java
@@ -178,6 +178,14 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
in.sync(fileNames);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ // NOTE: uncache is unnecessary for lucene's usage, as we always sync() before renaming.
+ unCache(source);
+ in.renameFile(source, dest);
+ }
+
+
@Override
public synchronized IndexInput openInput(String name, IOContext context) throws IOException {
if (VERBOSE) {
@@ -221,7 +229,7 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
bytes = context.flushInfo.estimatedSegmentSize;
}
- return !name.equals(IndexFileNames.SEGMENTS_GEN) && (bytes <= maxMergeSizeBytes) && (bytes + cache.ramBytesUsed()) <= maxCachedBytes;
+ return (bytes <= maxMergeSizeBytes) && (bytes + cache.ramBytesUsed()) <= maxCachedBytes;
}
private final Object uncacheLock = new Object();
diff --git a/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java b/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java
index 240efd450ff..7f5e4d7c3c3 100644
--- a/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java
@@ -112,6 +112,8 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
@Override
public final String[] listAll() {
ensureOpen();
+ // NOTE: this returns a "weakly consistent view". Unless we change Dir API, keep this,
+ // and do not synchronize or anything stronger. its great for testing!
// NOTE: fileMap.keySet().toArray(new String[0]) is broken in non Sun JDKs,
// and the code below is resilient to map changes during the array population.
Set fileNames = fileMap.keySet();
@@ -190,6 +192,17 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
public void sync(Collection names) throws IOException {
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ ensureOpen();
+ RAMFile file = fileMap.get(source);
+ if (file == null) {
+ throw new FileNotFoundException(source);
+ }
+ fileMap.put(dest, file);
+ fileMap.remove(source);
+ }
+
/** Returns a stream reading an existing file. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
@@ -207,5 +220,4 @@ public class RAMDirectory extends BaseDirectory implements Accountable {
isOpen = false;
fileMap.clear();
}
-
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 5eba346433b..eeccab6699c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -1115,8 +1115,8 @@ public class TestAddIndexes extends LuceneTestCase {
w3.addIndexes(readers);
w3.close();
// we should now see segments_X,
- // segments.gen,_Y.cfs,_Y.cfe, _Z.si
- assertEquals("Only one compound segment should exist, but got: " + Arrays.toString(dir.listAll()), 5, dir.listAll().length);
+ // _Y.cfs,_Y.cfe, _Z.si
+ assertEquals("Only one compound segment should exist, but got: " + Arrays.toString(dir.listAll()), 4, dir.listAll().length);
dir.close();
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
index 10d35ab1902..43666947e3b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
@@ -78,9 +78,6 @@ public class TestAllFilesHaveCodecHeader extends LuceneTestCase {
if (file.equals(IndexWriter.WRITE_LOCK_NAME)) {
continue; // write.lock has no header, thats ok
}
- if (file.equals(IndexFileNames.SEGMENTS_GEN)) {
- continue; // segments.gen has no header, thats ok
- }
if (file.endsWith(IndexFileNames.COMPOUND_FILE_EXTENSION)) {
CompoundFileDirectory cfsDir = new CompoundFileDirectory(dir, file, newIOContext(random()), false);
checkHeaders(cfsDir); // recurse into cfs
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
index 579d3784c64..e8ae00a4be2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
@@ -70,7 +70,7 @@ public class TestCrashCausesCorruptIndex extends LuceneTestCase {
// writes segments_1:
indexWriter.commit();
- crashAfterCreateOutput.setCrashAfterCreateOutput("segments_2");
+ crashAfterCreateOutput.setCrashAfterCreateOutput("pending_segments_2");
indexWriter.addDocument(getDocument());
try {
// tries to write segments_2 but hits fake exc:
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
index 6d6c50bce3c..6bc4a94d549 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -272,8 +272,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
gen);
- dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
-
boolean oneSecondResolution = true;
while(gen > 0) {
@@ -377,7 +375,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
// Simplistic check: just verify all segments_N's still
// exist, and, I can open a reader on each:
- dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
long gen = SegmentInfos.getLastCommitGeneration(dir);
while(gen > 0) {
IndexReader reader = DirectoryReader.open(dir);
@@ -599,7 +596,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
// Simplistic check: just verify only the past N segments_N's still
// exist, and, I can open a reader on each:
- dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
long gen = SegmentInfos.getLastCommitGeneration(dir);
for(int i=0;i= 3) {
+ if (failed) {
return;
}
//System.out.println("failOn: ");
@@ -670,7 +668,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
System.out.println("TEST: now fail; exc:");
new Throwable().printStackTrace(System.out);
}
- failCount++;
+ failed = true;
throw new FakeIOException();
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
index fc582c0a4cc..e9917d717e6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
@@ -145,6 +145,11 @@ public class TestFieldsReader extends LuceneTestCase {
fsDir.sync(names);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ fsDir.renameFile(source, dest);
+ }
+
@Override
public void close() throws IOException {
fsDir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 6f8eaf5e735..575349c7903 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -1366,8 +1366,7 @@ public class TestIndexWriter extends LuceneTestCase {
if (iter == 1) {
// we run a full commit so there should be a segments file etc.
assertTrue(files.contains("segments_1"));
- assertTrue(files.contains("segments.gen"));
- assertEquals(files.toString(), files.size(), 5);
+ assertEquals(files.toString(), files.size(), 4);
} else {
// this is an NRT reopen - no segments files yet
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index a8fb84ed753..faf4e4497ac 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -1111,7 +1111,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
// LUCENE-1044: Simulate checksum error in segments_N
public void testSegmentsChecksumError() throws IOException {
- Directory dir = newDirectory();
+ BaseDirectoryWrapper dir = newDirectory();
+ dir.setCheckIndexOnClose(false); // we corrupt the index
IndexWriter writer = null;
@@ -1137,17 +1138,12 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
out.close();
in.close();
- IndexReader reader = null;
try {
- reader = DirectoryReader.open(dir);
- } catch (IOException e) {
- e.printStackTrace(System.out);
- fail("segmentInfos failed to retry fallback to correct segments_N file");
+ DirectoryReader.open(dir);
+ fail("didn't get expected checksum error");
+ } catch (CorruptIndexException expected) {
}
- reader.close();
-
- // should remove the corrumpted segments_N
- new IndexWriter(dir, newIndexWriterConfig(null)).close();
+
dir.close();
}
@@ -1260,73 +1256,6 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
dir.close();
}
- // Simulate a writer that crashed while writing segments
- // file: make sure we can still open the index (ie,
- // gracefully fallback to the previous segments file),
- // and that we can add to the index:
- public void testSimulatedCrashedWriter() throws IOException {
- Directory dir = newDirectory();
- if (dir instanceof MockDirectoryWrapper) {
- ((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
- }
-
- IndexWriter writer = null;
-
- writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
-
- long gen = SegmentInfos.getLastCommitGeneration(dir);
- assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
- // Make the next segments file, with last byte
- // missing, to simulate a writer that crashed while
- // writing segments file:
- String fileNameIn = SegmentInfos.getLastCommitSegmentsFileName(dir);
- String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
- "",
- 1+gen);
- IndexInput in = dir.openInput(fileNameIn, newIOContext(random()));
- IndexOutput out = dir.createOutput(fileNameOut, newIOContext(random()));
- long length = in.length();
- for(int i=0;i names) throws IOException {
dir.sync(names);
}
+
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ dir.renameFile(source, dest);
+ }
+
@Override
public long fileLength(String name) throws IOException {
return dir.fileLength(name);
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
index 252abc3144b..0206bd9bbcc 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
@@ -77,7 +77,7 @@ public class TestIndexSplitter extends LuceneTestCase {
// now test cmdline
File destDir2 = createTempDir(LuceneTestCase.getTestClass().getSimpleName());
IndexSplitter.main(new String[] {dir.getAbsolutePath(), destDir2.getAbsolutePath(), splitSegName});
- assertEquals(5, destDir2.listFiles().length);
+ assertEquals(4, destDir2.listFiles().length);
Directory fsDirDest2 = newFSDirectory(destDir2);
r = DirectoryReader.open(fsDirDest2);
assertEquals(50, r.maxDoc());
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
index 9c925f6ec41..3ae695b8c64 100644
--- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
@@ -113,6 +113,8 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler {
List indexFiles = copiedFiles.get(IndexAndTaxonomyRevision.INDEX_SOURCE);
String taxoSegmentsFile = IndexReplicationHandler.getSegmentsFile(taxoFiles, true);
String indexSegmentsFile = IndexReplicationHandler.getSegmentsFile(indexFiles, false);
+ String taxoPendingFile = taxoSegmentsFile == null ? null : "pending_" + taxoSegmentsFile;
+ String indexPendingFile = "pending_" + indexSegmentsFile;
boolean success = false;
try {
@@ -126,24 +128,35 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler {
}
indexDir.sync(indexFiles);
- // now copy and fsync segmentsFile, taxonomy first because it is ok if a
+ // now copy, fsync, and rename segmentsFile, taxonomy first because it is ok if a
// reader sees a more advanced taxonomy than the index.
- if (taxoSegmentsFile != null) {
- taxoClientDir.copy(taxoDir, taxoSegmentsFile, taxoSegmentsFile, IOContext.READONCE);
- }
- indexClientDir.copy(indexDir, indexSegmentsFile, indexSegmentsFile, IOContext.READONCE);
if (taxoSegmentsFile != null) {
- taxoDir.sync(Collections.singletonList(taxoSegmentsFile));
+ taxoClientDir.copy(taxoDir, taxoSegmentsFile, taxoPendingFile, IOContext.READONCE);
}
- indexDir.sync(Collections.singletonList(indexSegmentsFile));
+ indexClientDir.copy(indexDir, indexSegmentsFile, indexPendingFile, IOContext.READONCE);
+
+ if (taxoSegmentsFile != null) {
+ taxoDir.sync(Collections.singletonList(taxoPendingFile));
+ }
+ indexDir.sync(Collections.singletonList(indexPendingFile));
+
+ if (taxoSegmentsFile != null) {
+ taxoDir.renameFile(taxoPendingFile, taxoSegmentsFile);
+ }
+
+ indexDir.renameFile(indexPendingFile, indexSegmentsFile);
success = true;
} finally {
if (!success) {
- taxoFiles.add(taxoSegmentsFile); // add it back so it gets deleted too
+ if (taxoSegmentsFile != null) {
+ taxoFiles.add(taxoSegmentsFile); // add it back so it gets deleted too
+ taxoFiles.add(taxoPendingFile);
+ }
IndexReplicationHandler.cleanupFilesOnFailure(taxoDir, taxoFiles);
indexFiles.add(indexSegmentsFile); // add it back so it gets deleted too
+ indexFiles.add(indexPendingFile);
IndexReplicationHandler.cleanupFilesOnFailure(indexDir, indexFiles);
}
}
@@ -156,10 +169,6 @@ public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler {
infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion
+ " currentRevisionFiles=" + currentRevisionFiles);
}
-
- // update the segments.gen file
- IndexReplicationHandler.writeSegmentsGen(taxoSegmentsFile, taxoDir);
- IndexReplicationHandler.writeSegmentsGen(indexSegmentsFile, indexDir);
// Cleanup the index directory from old and unused index files.
// NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
index bbbfbe41d71..192e2f75e1b 100644
--- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
@@ -31,7 +31,6 @@ import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexNotFoundException;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
@@ -110,7 +109,7 @@ public class IndexReplicationHandler implements ReplicationHandler {
}
String segmentsFile = files.remove(files.size() - 1);
- if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS) || segmentsFile.equals(IndexFileNames.SEGMENTS_GEN)) {
+ if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS) || segmentsFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
throw new IllegalStateException("last file to copy+sync must be segments_N but got " + segmentsFile
+ "; check your Revision implementation!");
}
@@ -148,7 +147,6 @@ public class IndexReplicationHandler implements ReplicationHandler {
if (commit != null && commit.getSegmentsFileName().equals(segmentsFile)) {
Set commitFiles = new HashSet<>();
commitFiles.addAll(commit.getFileNames());
- commitFiles.add(IndexFileNames.SEGMENTS_GEN);
Matcher matcher = IndexFileNames.CODEC_FILE_PATTERN.matcher("");
for (String file : dir.listAll()) {
if (!commitFiles.contains(file)
@@ -180,19 +178,6 @@ public class IndexReplicationHandler implements ReplicationHandler {
}
}
- /**
- * Writes {@link IndexFileNames#SEGMENTS_GEN} file to the directory, reading
- * the generation from the given {@code segmentsFile}. If it is {@code null},
- * this method deletes segments.gen from the directory.
- */
- public static void writeSegmentsGen(String segmentsFile, Directory dir) {
- if (segmentsFile != null) {
- SegmentInfos.writeSegmentsGen(dir, SegmentInfos.generationFromSegmentsFileName(segmentsFile));
- } else {
- IOUtils.deleteFilesIgnoringExceptions(dir, IndexFileNames.SEGMENTS_GEN);
- }
- }
-
/**
* Constructor with the given index directory and callback to notify when the
* indexes were updated.
@@ -236,6 +221,7 @@ public class IndexReplicationHandler implements ReplicationHandler {
Directory clientDir = sourceDirectory.values().iterator().next();
List files = copiedFiles.values().iterator().next();
String segmentsFile = getSegmentsFile(files, false);
+ String pendingSegmentsFile = "pending_" + segmentsFile;
boolean success = false;
try {
@@ -245,14 +231,16 @@ public class IndexReplicationHandler implements ReplicationHandler {
// fsync all copied files (except segmentsFile)
indexDir.sync(files);
- // now copy and fsync segmentsFile
- clientDir.copy(indexDir, segmentsFile, segmentsFile, IOContext.READONCE);
- indexDir.sync(Collections.singletonList(segmentsFile));
+ // now copy and fsync segmentsFile as pending, then rename (simulating lucene commit)
+ clientDir.copy(indexDir, segmentsFile, pendingSegmentsFile, IOContext.READONCE);
+ indexDir.sync(Collections.singletonList(pendingSegmentsFile));
+ indexDir.renameFile(pendingSegmentsFile, segmentsFile);
success = true;
} finally {
if (!success) {
files.add(segmentsFile); // add it back so it gets deleted too
+ files.add(pendingSegmentsFile);
cleanupFilesOnFailure(indexDir, files);
}
}
@@ -265,9 +253,6 @@ public class IndexReplicationHandler implements ReplicationHandler {
infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion
+ " currentRevisionFiles=" + currentRevisionFiles);
}
-
- // update the segments.gen file
- writeSegmentsGen(segmentsFile, indexDir);
// Cleanup the index directory from old and unused index files.
// NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
index 378a3928abf..2c832daef11 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
@@ -126,7 +126,7 @@ public class IndexAndTaxonomyRevisionTest extends ReplicatorTestCase {
assertEquals(2, sourceFiles.size());
for (List files : sourceFiles.values()) {
String lastFile = files.get(files.size() - 1).fileName;
- assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS) && !lastFile.equals(IndexFileNames.SEGMENTS_GEN));
+ assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
}
indexWriter.close();
} finally {
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
index c0c4d04f65d..6ebdd15ab45 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
@@ -114,7 +114,7 @@ public class IndexRevisionTest extends ReplicatorTestCase {
assertEquals(1, sourceFiles.size());
List files = sourceFiles.values().iterator().next();
String lastFile = files.get(files.size() - 1).fileName;
- assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS) && !lastFile.equals(IndexFileNames.SEGMENTS_GEN));
+ assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
writer.close();
} finally {
IOUtils.close(dir);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
index d52734d9e22..8031b50b5b9 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseDirectoryTestCase.java
@@ -96,6 +96,29 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
IOUtils.close(source, dest);
}
+ public void testRename() throws Exception {
+ Directory dir = getDirectory(createTempDir("testRename"));
+
+ IndexOutput output = dir.createOutput("foobar", newIOContext(random()));
+ int numBytes = random().nextInt(20000);
+ byte bytes[] = new byte[numBytes];
+ random().nextBytes(bytes);
+ output.writeBytes(bytes, bytes.length);
+ output.close();
+
+ dir.renameFile("foobar", "foobaz");
+
+ IndexInput input = dir.openInput("foobaz", newIOContext(random()));
+ byte bytes2[] = new byte[numBytes];
+ input.readBytes(bytes2, 0, bytes2.length);
+ assertEquals(input.length(), numBytes);
+ input.close();
+
+ assertArrayEquals(bytes, bytes2);
+
+ dir.close();
+ }
+
// TODO: are these semantics really needed by lucene? can we just throw exception?
public void testCopyOverwrite() throws Exception {
Directory source = getDirectory(createTempDir("testCopyOverwrite"));
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
index a4bd94dd0f4..759275d48b3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -238,6 +238,39 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper {
}
}
+ @Override
+ public synchronized void renameFile(String source, String dest) throws IOException {
+ maybeYield();
+ maybeThrowDeterministicException();
+
+ if (crashed) {
+ throw new IOException("cannot rename after crash");
+ }
+
+ if (openFiles.containsKey(source)) {
+ if (assertNoDeleteOpenFile) {
+ throw (AssertionError) fillOpenTrace(new AssertionError("MockDirectoryWrapper: file \"" + source + "\" is still open: cannot rename"), source, true);
+ } else if (noDeleteOpenFile) {
+ throw (IOException) fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + source + "\" is still open: cannot rename"), source, true);
+ }
+ }
+
+ boolean success = false;
+ try {
+ in.renameFile(source, dest);
+ success = true;
+ } finally {
+ if (success) {
+ // we don't do this stuff with lucene's commit, but its just for completeness
+ if (unSyncedFiles.contains(source)) {
+ unSyncedFiles.remove(source);
+ unSyncedFiles.add(dest);
+ }
+ openFilesDeleted.remove(source);
+ }
+ }
+ }
+
public synchronized final long sizeInBytes() throws IOException {
if (in instanceof RAMDirectory)
return ((RAMDirectory) in).ramBytesUsed();
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
index 97f2bbc142c..5c7e5142c35 100644
--- a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
@@ -23,6 +23,7 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Set;
+import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IOContext;
@@ -236,8 +237,6 @@ public class BlockDirectory extends Directory {
for (String file : files) {
cache.delete(getFileCacheName(file));
}
- // segments.gen won't be removed above
- cache.delete(dirName + "/" + "segments.gen");
} catch (FileNotFoundException e) {
// the local file system folder may be gone
@@ -342,7 +341,9 @@ public class BlockDirectory extends Directory {
* file/context.
*/
boolean useWriteCache(String name, IOContext context) {
- if (!blockCacheWriteEnabled) {
+ if (!blockCacheWriteEnabled || name.startsWith(IndexFileNames.PENDING_SEGMENTS)) {
+ // for safety, don't bother caching pending commits.
+ // the cache does support renaming (renameCacheFile), but thats a scary optimization.
return false;
}
if (blockCacheFileTypes != null && !isCachableFile(name)) {
@@ -375,6 +376,11 @@ public class BlockDirectory extends Directory {
directory.deleteFile(name);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ directory.renameFile(source, dest);
+ }
+
public long fileLength(String name) throws IOException {
return directory.fileLength(name);
}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
index 8e0ca20fd75..dddd406f3bb 100644
--- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
@@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -44,11 +45,11 @@ public class HdfsDirectory extends BaseDirectory {
public static final int BUFFER_SIZE = 8192;
private static final String LF_EXT = ".lf";
- protected static final String SEGMENTS_GEN = "segments.gen";
protected Path hdfsDirPath;
protected Configuration configuration;
private final FileSystem fileSystem;
+ private final FileContext fileContext;
public HdfsDirectory(Path hdfsDirPath, Configuration configuration)
throws IOException {
@@ -56,6 +57,7 @@ public class HdfsDirectory extends BaseDirectory {
this.hdfsDirPath = hdfsDirPath;
this.configuration = configuration;
fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), configuration);
+ fileContext = FileContext.getFileContext(hdfsDirPath.toUri(), configuration);
while (true) {
try {
@@ -98,9 +100,6 @@ public class HdfsDirectory extends BaseDirectory {
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
- if (SEGMENTS_GEN.equals(name)) {
- return new NullIndexOutput();
- }
return new HdfsFileWriter(getFileSystem(), new Path(hdfsDirPath, name));
}
@@ -138,6 +137,13 @@ public class HdfsDirectory extends BaseDirectory {
getFileSystem().delete(path, false);
}
+ @Override
+ public void renameFile(String source, String dest) throws IOException {
+ Path sourcePath = new Path(hdfsDirPath, source);
+ Path destPath = new Path(hdfsDirPath, dest);
+ fileContext.rename(sourcePath, destPath);
+ }
+
@Override
public long fileLength(String name) throws IOException {
return HdfsFileReader.getLength(getFileSystem(),
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java b/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java
deleted file mode 100644
index 9e658c91d17..00000000000
--- a/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package org.apache.solr.store.hdfs;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.store.IndexOutput;
-
-/**
- * @lucene.experimental
- */
-public class NullIndexOutput extends IndexOutput {
-
- private long pos;
- private long length;
-
- @Override
- public void close() throws IOException {
-
- }
-
- @Override
- public long getFilePointer() {
- return pos;
- }
-
- @Override
- public void writeByte(byte b) throws IOException {
- pos++;
- updateLength();
- }
-
- @Override
- public void writeBytes(byte[] b, int offset, int length) throws IOException {
- pos += length;
- updateLength();
- }
-
- private void updateLength() {
- if (pos > length) {
- length = pos;
- }
- }
-
- @Override
- public long getChecksum() throws IOException {
- return 0; // we don't write anything.
- }
-}
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminCreateDiscoverTest.java b/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminCreateDiscoverTest.java
index 33347f9a01d..ccd43c0071e 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminCreateDiscoverTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminCreateDiscoverTest.java
@@ -141,9 +141,6 @@ public class CoreAdminCreateDiscoverTest extends SolrTestCaseJ4 {
// Should have segments in the directory pointed to by the ${DATA_TEST}.
File test = new File(dataDir, "index");
assertTrue("Should have found index dir at " + test.getAbsolutePath(), test.exists());
- File gen = new File(test, "segments.gen");
- assertTrue("Should be segments.gen in the dir at " + gen.getAbsolutePath(), gen.exists());
-
}
@Test
@@ -276,9 +273,6 @@ public class CoreAdminCreateDiscoverTest extends SolrTestCaseJ4 {
// Should have segments in the directory pointed to by the ${DATA_TEST}.
File test = new File(data, "index");
assertTrue("Should have found index dir at " + test.getAbsolutePath(), test.exists());
- File gen = new File(test, "segments.gen");
- assertTrue("Should be segments.gen in the dir at " + gen.getAbsolutePath(), gen.exists());
-
}
}
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminHandlerTest.java
index 5a614d069ad..ebb1b5b6e8b 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/CoreAdminHandlerTest.java
@@ -120,8 +120,6 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
// Should have segments in the directory pointed to by the ${DATA_TEST}.
File test = new File(dataDir, "index");
assertTrue("Should have found index dir at " + test.getAbsolutePath(), test.exists());
- test = new File(test,"segments.gen");
- assertTrue("Should have found segments.gen at " + test.getAbsolutePath(), test.exists());
}
@Test
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
index 7289d736863..533c74c09cd 100644
--- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
@@ -119,6 +119,26 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 {
assertFalse(slowFileExists(directory, "testing.test"));
}
+ public void testRename() throws IOException {
+ String[] listAll = directory.listAll();
+ for (String file : listAll) {
+ directory.deleteFile(file);
+ }
+
+ IndexOutput output = directory.createOutput("testing.test", new IOContext());
+ output.writeInt(12345);
+ output.close();
+ directory.renameFile("testing.test", "testing.test.renamed");
+ assertFalse(slowFileExists(directory, "testing.test"));
+ assertTrue(slowFileExists(directory, "testing.test.renamed"));
+ IndexInput input = directory.openInput("testing.test.renamed", new IOContext());
+ assertEquals(12345, input.readInt());
+ assertEquals(input.getFilePointer(), input.length());
+ input.close();
+ directory.deleteFile("testing.test.renamed");
+ assertFalse(slowFileExists(directory, "testing.test.renamed"));
+ }
+
@Test
public void testEOF() throws IOException {
Directory fsDir = new RAMDirectory();