Throw LockObtainFailedException exception when we can lock index directory
Today we throw ElasticsearchException if we can't lock the index. This can cause problems since some places where we have logic to deal with IOException on shard deletion won't schedule a retry if we can't lock the index dir for removal. This is the case on shadow replicas for instance if a shared FS is used. The result of this is that the delete of an index is never acked.
This commit is contained in:
parent
64f981fc32
commit
8e07b4fba4
|
@ -311,7 +311,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
* shard paths. The "write.lock" file is assumed to be under the shard
|
||||
* path's "index" directory as used by Elasticsearch.
|
||||
*
|
||||
* @throws ElasticsearchException if any of the locks could not be acquired
|
||||
* @throws LockObtainFailedException if any of the locks could not be acquired
|
||||
*/
|
||||
public static void acquireFSLockForPaths(@IndexSettings Settings indexSettings, Path... shardPaths) throws IOException {
|
||||
Lock[] locks = new Lock[shardPaths.length];
|
||||
|
@ -326,7 +326,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
try {
|
||||
locks[i] = Lucene.acquireWriteLock(dirs[i]);
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("unable to acquire " +
|
||||
throw new LockObtainFailedException("unable to acquire " +
|
||||
IndexWriter.WRITE_LOCK_NAME + " for " + p);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexStats;
|
||||
|
@ -110,7 +111,6 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest {
|
|||
public void testLockTryingToDelete() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
//IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
|
||||
Path[] shardPaths = env.availableShardPaths(new ShardId("test", 0));
|
||||
logger.info("--> paths: [{}]", shardPaths);
|
||||
|
@ -118,7 +118,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest {
|
|||
try {
|
||||
NodeEnvironment.acquireFSLockForPaths(Settings.EMPTY, shardPaths);
|
||||
fail("should not have been able to acquire the lock");
|
||||
} catch (ElasticsearchException e) {
|
||||
} catch (LockObtainFailedException e) {
|
||||
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
|
||||
}
|
||||
// Test without the regular shard lock to assume we can acquire it
|
||||
|
@ -128,7 +128,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest {
|
|||
try {
|
||||
env.deleteShardDirectoryUnderLock(sLock, Settings.builder().build());
|
||||
fail("should not have been able to delete the directory");
|
||||
} catch (ElasticsearchException e) {
|
||||
} catch (LockObtainFailedException e) {
|
||||
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue