Fall back to reading SegmentInfos from Store if reading from commit

fails

In the event that reading from the latest commit fails, we should fall
back to reading from the `Store` using the traditional
`Directory.listAll()`

Related to #11361
This commit is contained in:
Lee Hinman 2015-05-28 10:00:36 -06:00
parent 55fc3a727b
commit 790baed755
3 changed files with 12 additions and 4 deletions

View File

@ -319,11 +319,19 @@ public abstract class Engine implements Closeable {
/** /**
* Read the last segments info from the commit pointed to by the searcher manager * Read the last segments info from the commit pointed to by the searcher manager
*/ */
protected static SegmentInfos readLastCommittedSegmentInfos(SearcherManager sm) throws IOException { protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException {
IndexSearcher searcher = sm.acquire(); IndexSearcher searcher = sm.acquire();
try { try {
IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit(); IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit();
return Lucene.readSegmentInfos(latestCommit); return Lucene.readSegmentInfos(latestCommit);
} catch (IOException e) {
// Fall back to reading from the store if reading from the commit fails
try {
return store. readLastCommittedSegmentsInfo();
} catch (IOException e2) {
e2.addSuppressed(e);
throw e2;
}
} finally { } finally {
sm.release(searcher); sm.release(searcher);
} }

View File

@ -275,7 +275,7 @@ public class InternalEngine extends Engine {
try { try {
final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId); final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId);
searcherManager = new SearcherManager(directoryReader, searcherFactory); searcherManager = new SearcherManager(directoryReader, searcherFactory);
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
success = true; success = true;
return searcherManager; return searcherManager;
} catch (IOException e) { } catch (IOException e) {

View File

@ -79,7 +79,7 @@ public class ShadowEngine extends Engine {
if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) { if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) {
reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId);
this.searcherManager = new SearcherManager(reader, searcherFactory); this.searcherManager = new SearcherManager(reader, searcherFactory);
this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
success = true; success = true;
} else { } else {
throw new IndexShardException(shardId, "failed to open a shadow engine after" + throw new IndexShardException(shardId, "failed to open a shadow engine after" +
@ -148,7 +148,7 @@ public class ShadowEngine extends Engine {
store.incRef(); store.incRef();
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
// reread the last committed segment infos // reread the last committed segment infos
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
} catch (Throwable e) { } catch (Throwable e) {
if (isClosed.get() == false) { if (isClosed.get() == false) {
logger.warn("failed to read latest segment infos on flush", e); logger.warn("failed to read latest segment infos on flush", e);