Improve deletion of corrupted snapshots

Makes it possible to delete snapshots that are missing some of the metadata files. This can happen if snapshot creation failed because repository drive ran out of disk space.

Closes #6383
This commit is contained in:
Igor Motov 2014-06-02 13:10:54 -04:00
parent 1425e28639
commit 2149a9403d
2 changed files with 91 additions and 60 deletions

View File

@ -259,7 +259,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
@Override
public void deleteSnapshot(SnapshotId snapshotId) {
Snapshot snapshot = readSnapshot(snapshotId);
MetaData metaData = readSnapshotMetaData(snapshotId, snapshot.indices());
MetaData metaData = readSnapshotMetaData(snapshotId, snapshot.indices(), true);
try {
String blobName = snapshotBlobName(snapshotId);
// Delete snapshot file first so we wouldn't end up with partially deleted snapshot that looks OK
@ -284,11 +284,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
try {
indexMetaDataBlobContainer.deleteBlob(blobName);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to delete metadata", ex);
logger.warn("[{}] failed to delete metadata for index [{}]", ex, snapshotId, index);
}
IndexMetaData indexMetaData = metaData.index(index);
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
indexShardRepository.delete(snapshotId, new ShardId(index, i));
if (indexMetaData != null) {
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
indexShardRepository.delete(snapshotId, new ShardId(index, i));
}
}
}
} catch (IOException ex) {
@ -367,41 +369,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
*/
@Override
public MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices) {
MetaData metaData;
try {
byte[] data = snapshotsBlobContainer.readBlobFully(metaDataBlobName(snapshotId));
metaData = readMetaData(data);
} catch (FileNotFoundException | NoSuchFileException ex) {
throw new SnapshotMissingException(snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
}
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
for (String index : indices) {
BlobPath indexPath = basePath().add("indices").add(index);
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
XContentParser parser = null;
try {
byte[] data = indexMetaDataBlobContainer.readBlobFully(snapshotBlobName(snapshotId));
parser = XContentHelper.createParser(data, 0, data.length);
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
IndexMetaData indexMetaData = IndexMetaData.Builder.fromXContent(parser);
if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
metaDataBuilder.put(indexMetaData, false);
continue;
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to read metadata", ex);
} finally {
if (parser != null) {
parser.close();
}
}
}
return metaDataBuilder.build();
return readSnapshotMetaData(snapshotId, indices, false);
}
/**
@ -439,6 +407,48 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
}
}
private MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices, boolean ignoreIndexErrors) {
MetaData metaData;
try {
byte[] data = snapshotsBlobContainer.readBlobFully(metaDataBlobName(snapshotId));
metaData = readMetaData(data);
} catch (FileNotFoundException | NoSuchFileException ex) {
throw new SnapshotMissingException(snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
}
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
for (String index : indices) {
BlobPath indexPath = basePath().add("indices").add(index);
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
try {
byte[] data = indexMetaDataBlobContainer.readBlobFully(snapshotBlobName(snapshotId));
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
IndexMetaData indexMetaData = IndexMetaData.Builder.fromXContent(parser);
if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
metaDataBuilder.put(indexMetaData, false);
continue;
}
}
if (!ignoreIndexErrors) {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} else {
logger.warn("[{}] [{}] unexpected token while reading snapshot metadata [{}]", snapshotId, index, token);
}
}
} catch (IOException ex) {
if (!ignoreIndexErrors) {
throw new SnapshotException(snapshotId, "failed to read metadata", ex);
} else {
logger.warn("[{}] [{}] failed to read metadata for index", snapshotId, index, ex);
}
}
}
return metaDataBuilder.build();
}
/**
* Configures RateLimiter based on repository and global settings
*
@ -465,9 +475,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
* @throws IOException parse exceptions
*/
private BlobStoreSnapshot readSnapshot(byte[] data) throws IOException {
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
@ -479,10 +487,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} finally {
if (parser != null) {
parser.close();
}
}
}
@ -494,9 +498,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
* @throws IOException parse exceptions
*/
private MetaData readMetaData(byte[] data) throws IOException {
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
@ -508,10 +510,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} finally {
if (parser != null) {
parser.close();
}
}
}
@ -615,9 +613,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
protected ImmutableList<SnapshotId> readSnapshotList() throws IOException {
byte[] data = snapshotsBlobContainer.readBlobFully(SNAPSHOTS_FILE);
ArrayList<SnapshotId> snapshots = new ArrayList<>();
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
@ -630,10 +626,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
}
}
}
} finally {
if (parser != null) {
parser.close();
}
}
return ImmutableList.copyOf(snapshots);
}

View File

@ -629,6 +629,45 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
}
@Test
public void deleteSnapshotWithMissingIndexAndShardMetadataTest() throws Exception {
Client client = client();
File repo = newTempDir(LifecycleScope.SUITE);
logger.info("--> creating repository at " + repo.getAbsolutePath());
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
.setType("fs").setSettings(ImmutableSettings.settingsBuilder()
.put("location", repo)
.put("compress", false)
.put("chunk_size", randomIntBetween(100, 1000))));
createIndex("test-idx-1", "test-idx-2");
ensureYellow();
logger.info("--> indexing some data");
indexRandom(true,
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
logger.info("--> creating snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete index metadata and shard metadata");
File indices = new File(repo, "indices");
File testIndex1 = new File(indices, "test-idx-1");
File testIndex2 = new File(indices, "test-idx-2");
File testIndex2Shard0 = new File(testIndex2, "0");
new File(testIndex1, "snapshot-test-snap-1").delete();
new File(testIndex2Shard0, "snapshot-test-snap-1").delete();
logger.info("--> delete snapshot");
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
logger.info("--> make sure snapshot doesn't exist");
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
}
@Test
@TestLogging("snapshots:TRACE")
public void snapshotClosedIndexTest() throws Exception {