Only flush for checkindex if we have uncommitted changes

Today we force a flush before check index to ensure we have an index
to check on. Yet if the index is large and the FS is slow this can have
significant impact on the index deletion performance. This commit introduces
a check if there are any uncommitted changes in order to skip the additional commit.

Closes #10505
This commit is contained in:
Simon Willnauer 2015-04-09 14:54:40 +02:00
parent 5367e04fbc
commit d470bdbff6
5 changed files with 20 additions and 21 deletions

View File

@ -1061,4 +1061,10 @@ public abstract class Engine implements Closeable {
}
}
}
/**
* Returns <code>true</code> the internal writer has any uncommitted changes. Otherwise <code>false</code>
* @return
*/
public abstract boolean hasUncommittedChanges();
}

View File

@ -940,6 +940,11 @@ public class InternalEngine extends Engine {
}
}
@Override
public boolean hasUncommittedChanges() {
return indexWriter.hasUncommittedChanges();
}
@Override
protected SearcherManager getSearcherManager() {
return searcherManager;

View File

@ -216,4 +216,9 @@ public class ShadowEngine extends Engine {
}
}
}
@Override
public boolean hasUncommittedChanges() {
return false;
}
}

View File

@ -158,7 +158,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
}
void unloadIndex(String indexName) throws Exception {
client().admin().indices().prepareFlush(indexName).setWaitIfOngoing(true).setForce(true).get(); // temporary for debugging
ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDelete(indexName).get());
ElasticsearchAssertions.assertAllFilesClosed();
}
@ -201,20 +200,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
Collections.shuffle(indexes, getRandom());
for (String index : indexes) {
if (index.equals("index-0.90.13.zip") == false) {
long startTime = System.currentTimeMillis();
logger.info("--> Testing old index " + index);
assertOldIndexWorks(index);
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
}
}
}
@TestLogging("test.engine:TRACE,index.engine:TRACE,test.engine.lucene:TRACE,index.engine.lucene:TRACE")
public void testShitSlowIndex() throws Exception {
setupCluster();
for (int i = 0; i < 5; i++) {
String index = "index-0.90.13.zip";
long startTime = System.currentTimeMillis();
logger.info("--> Testing old index " + index);
assertOldIndexWorks(index);

View File

@ -26,17 +26,13 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.AbstractRandomizedTest;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.IndexShardException;
import org.elasticsearch.index.shard.IndexShardState;
@ -91,8 +87,10 @@ public class MockFSDirectoryService extends FsDirectoryService {
// When the the internal engine closes we do a rollback, which removes uncommitted segments
// By doing a commit flush we perform a Lucene commit, but don't clear the translog,
// so that even in tests where don't flush we can check the integrity of the Lucene index
logger.info("{} flushing in order to run checkindex", indexShard.shardId());
Releasables.close(indexShard.engine().snapshotIndex()); // Keep translog for tests that rely on replaying it
if (indexShard.engine().hasUncommittedChanges()) { // only if we have any changes
logger.info("{} flushing in order to run checkindex", indexShard.shardId());
Releasables.close(indexShard.engine().snapshotIndex()); // Keep translog for tests that rely on replaying it
}
logger.info("{} flush finished in beforeIndexShardClosed", indexShard.shardId());
canRun = true;
}