Dangling indices are not imported if a tombstone for the same index

(same name and UUID) exists in the cluster state.  This resolves a
situation where if an index data folder was copied into a node's data
directory while the node is running and that index had a tombstone in
the cluster state, the index would still get imported.

Closes #18250
Closes #18249
This commit is contained in:
Ali Beyad 2016-05-11 12:54:41 -04:00
parent ce4af4be42
commit 5189eb41c7
6 changed files with 71 additions and 16 deletions

View File

@ -364,7 +364,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]ESFileStore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]AsyncShardFetch.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]DanglingIndicesState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayAllocator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />

View File

@ -123,6 +123,18 @@ final public class IndexGraveyard implements MetaData.Custom {
return tombstones;
}
/**
* Returns true if the graveyard contains a tombstone for the given index.
*/
public boolean containsIndex(final Index index) {
for (Tombstone tombstone : tombstones) {
if (tombstone.getIndex().equals(index)) {
return true;
}
}
return false;
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startArray(TOMBSTONES_FIELD.getPreferredName());

View File

@ -20,6 +20,7 @@
package org.elasticsearch.gateway;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.component.AbstractComponent;
@ -68,7 +69,7 @@ public class DanglingIndicesState extends AbstractComponent {
* Process dangling indices based on the provided meta data, handling cleanup, finding
* new dangling indices, and allocating outstanding ones.
*/
public void processDanglingIndices(MetaData metaData) {
public void processDanglingIndices(final MetaData metaData) {
if (nodeEnv.hasNodeFile() == false) {
return;
}
@ -107,7 +108,7 @@ public class DanglingIndicesState extends AbstractComponent {
* Finds (@{link #findNewAndAddDanglingIndices}) and adds the new dangling indices
* to the currently tracked dangling indices.
*/
void findNewAndAddDanglingIndices(MetaData metaData) {
void findNewAndAddDanglingIndices(final MetaData metaData) {
danglingIndices.putAll(findNewDanglingIndices(metaData));
}
@ -116,7 +117,7 @@ public class DanglingIndicesState extends AbstractComponent {
* that have state on disk, but are not part of the provided meta data, or not detected
* as dangled already.
*/
Map<Index, IndexMetaData> findNewDanglingIndices(MetaData metaData) {
Map<Index, IndexMetaData> findNewDanglingIndices(final MetaData metaData) {
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size());
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
@ -125,13 +126,18 @@ public class DanglingIndicesState extends AbstractComponent {
try {
final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size());
final IndexGraveyard graveyard = metaData.indexGraveyard();
for (IndexMetaData indexMetaData : indexMetaDataList) {
if (metaData.hasIndex(indexMetaData.getIndex().getName())) {
logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata",
indexMetaData.getIndex());
} else if (graveyard.containsIndex(indexMetaData.getIndex())) {
logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " +
"index tombstones. This situation is likely caused by copying over the data directory for an index " +
"that was previously deleted.", indexMetaData.getIndex());
} else {
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state",
indexMetaData.getIndex());
logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " +
"auto import to cluster state", indexMetaData.getIndex());
newIndices.put(indexMetaData.getIndex(), indexMetaData);
}
}
@ -151,7 +157,8 @@ public class DanglingIndicesState extends AbstractComponent {
return;
}
try {
allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), new LocalAllocateDangledIndices.Listener() {
allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())),
new LocalAllocateDangledIndices.Listener() {
@Override
public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
logger.trace("allocated dangled");
@ -161,7 +168,8 @@ public class DanglingIndicesState extends AbstractComponent {
public void onFailure(Throwable e) {
logger.info("failed to send allocated dangled", e);
}
});
}
);
} catch (Throwable e) {
logger.warn("failed to send allocate dangled", e);
}

View File

@ -527,7 +527,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
try {
if (clusterState.metaData().hasIndex(indexName)) {
final IndexMetaData index = clusterState.metaData().index(indexName);
throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " +
"the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
}
deleteIndexStore(reason, metaData, clusterState);
} catch (IOException e) {

View File

@ -130,6 +130,23 @@ public class IndexGraveyardTests extends ESTestCase {
assertThat(diff.getRemovedCount(), equalTo(removals.size()));
}
public void testContains() {
List<Index> indices = new ArrayList<>();
final int numIndices = randomIntBetween(1, 5);
for (int i = 0; i < numIndices; i++) {
indices.add(new Index("idx-" + i, UUIDs.randomBase64UUID()));
}
final IndexGraveyard.Builder graveyard = IndexGraveyard.builder();
for (final Index index : indices) {
graveyard.addTombstone(index);
}
final IndexGraveyard indexGraveyard = graveyard.build();
for (final Index index : indices) {
assertTrue(indexGraveyard.containsIndex(index));
}
assertFalse(indexGraveyard.containsIndex(new Index(randomAsciiOfLength(6), UUIDs.randomBase64UUID())));
}
public static IndexGraveyard createRandom() {
final IndexGraveyard.Builder graveyard = IndexGraveyard.builder();
final int numTombstones = randomIntBetween(0, 4);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.gateway;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.settings.Settings;
@ -37,6 +38,7 @@ import static org.hamcrest.Matchers.equalTo;
/**
*/
public class DanglingIndicesStateTests extends ESTestCase {
private static Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
@ -139,4 +141,20 @@ public class DanglingIndicesStateTests extends ESTestCase {
assertTrue(danglingState.getDanglingIndices().isEmpty());
}
}
public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exception {
try (NodeEnvironment env = newNodeEnvironment()) {
MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID");
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build();
metaStateService.writeIndex("test_write", dangledIndex);
final IndexGraveyard graveyard = IndexGraveyard.builder().addTombstone(dangledIndex.getIndex()).build();
final MetaData metaData = MetaData.builder().indexGraveyard(graveyard).build();
assertThat(danglingState.findNewDanglingIndices(metaData).size(), equalTo(0));
}
}
}