Merge branch 'master' into feature/seq_no

This commit is contained in:
Boaz Leskes 2016-03-25 17:26:14 +01:00
commit dcd2642dad
5 changed files with 80 additions and 77 deletions

View File

@ -191,39 +191,29 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void applyCleanedIndices(final ClusterChangedEvent event) { private void cleanFailedShards(final ClusterChangedEvent event) {
// handle closed indices, since they are not allocated on a node once they are closed RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
// so applyDeletedIndices might not take them into account if (routingNode == null) {
for (IndexService indexService : indicesService) { failedShards.clear();
Index index = indexService.index(); return;
IndexMetaData indexMetaData = event.state().metaData().index(index); }
if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) { RoutingTable routingTable = event.state().routingTable();
for (Integer shardId : indexService.shardIds()) { for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
logger.debug("{}[{}] removing shard (index is closed)", index, shardId); Map.Entry<ShardId, ShardRouting> entry = iterator.next();
try { ShardId failedShardId = entry.getKey();
indexService.removeShard(shardId, "removing shard (index is closed)"); ShardRouting failedShardRouting = entry.getValue();
} catch (Throwable e) { IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
logger.warn("{} failed to remove shard (index is closed)", e, index); if (indexRoutingTable == null) {
} iterator.remove();
} continue;
} }
} IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
if (shardRoutingTable == null) {
Set<Index> hasAllocations = new HashSet<>(); iterator.remove();
for (ShardRouting routing : event.state().getRoutingNodes().node(event.state().nodes().localNodeId())) { continue;
hasAllocations.add(routing.index()); }
} if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
for (IndexService indexService : indicesService) { iterator.remove();
Index index = indexService.index();
if (hasAllocations.contains(index) == false) {
assert indexService.shardIds().isEmpty() :
"no locally assigned shards, but index wasn't emptied by applyDeletedShards."
+ " index " + index + ", shards: " + indexService.shardIds();
if (logger.isDebugEnabled()) {
logger.debug("{} cleaning index (no shards allocated)", index);
}
// clean the index
removeIndex(index, "removing index (no shards allocated)");
} }
} }
} }
@ -305,23 +295,39 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void applyNewIndices(final ClusterChangedEvent event) { private void applyCleanedIndices(final ClusterChangedEvent event) {
// we only create indices for shards that are allocated // handle closed indices, since they are not allocated on a node once they are closed
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); // so applyDeletedIndices might not take them into account
if (routingNode == null) { for (IndexService indexService : indicesService) {
return; Index index = indexService.index();
IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
for (Integer shardId : indexService.shardIds()) {
logger.debug("{}[{}] removing shard (index is closed)", index, shardId);
try {
indexService.removeShard(shardId, "removing shard (index is closed)");
} catch (Throwable e) {
logger.warn("{} failed to remove shard (index is closed)", e, index);
}
}
}
} }
for (ShardRouting shard : routingNode) {
if (!indicesService.hasIndex(shard.index())) { Set<Index> hasAllocations = new HashSet<>();
final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index()); for (ShardRouting routing : event.state().getRoutingNodes().node(event.state().nodes().localNodeId())) {
hasAllocations.add(routing.index());
}
for (IndexService indexService : indicesService) {
Index index = indexService.index();
if (hasAllocations.contains(index) == false) {
assert indexService.shardIds().isEmpty() :
"no locally assigned shards, but index wasn't emptied by applyDeletedShards."
+ " index " + index + ", shards: " + indexService.shardIds();
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] creating index", indexMetaData.getIndex()); logger.debug("{} cleaning index (no shards allocated)", index);
}
try {
indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
} catch (Throwable e) {
sendFailShard(shard, "failed to create index", e);
} }
// clean the index
removeIndex(index, "removing index (no shards allocated)");
} }
} }
} }
@ -349,6 +355,26 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void applyNewIndices(final ClusterChangedEvent event) {
// we only create indices for shards that are allocated
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
return;
}
for (ShardRouting shard : routingNode) {
if (!indicesService.hasIndex(shard.index())) {
final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index());
if (logger.isDebugEnabled()) {
logger.debug("[{}] creating index", indexMetaData.getIndex());
}
try {
indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
} catch (Throwable e) {
sendFailShard(shard, "failed to create index", e);
}
}
}
}
private void applyMappings(ClusterChangedEvent event) { private void applyMappings(ClusterChangedEvent event) {
// go over and update mappings // go over and update mappings
@ -503,33 +529,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} }
} }
private void cleanFailedShards(final ClusterChangedEvent event) {
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
failedShards.clear();
return;
}
RoutingTable routingTable = event.state().routingTable();
for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<ShardId, ShardRouting> entry = iterator.next();
ShardId failedShardId = entry.getKey();
ShardRouting failedShardRouting = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
if (indexRoutingTable == null) {
iterator.remove();
continue;
}
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
if (shardRoutingTable == null) {
iterator.remove();
continue;
}
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
iterator.remove();
}
}
}
private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, IndexService indexService, final ShardRouting shardRouting) { private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, IndexService indexService, final ShardRouting shardRouting) {
final RoutingTable routingTable = state.routingTable(); final RoutingTable routingTable = state.routingTable();
final DiscoveryNodes nodes = state.getNodes(); final DiscoveryNodes nodes = state.getNodes();

View File

@ -40,6 +40,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25; private static final int RELOCATION_COUNT = 25;
@TestLogging("_root:DEBUG,action.delete:TRACE,action.index:TRACE,index.shard:TRACE,cluster.service:TRACE")
public void testPrimaryRelocationWhileIndexing() throws Exception { public void testPrimaryRelocationWhileIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3));
client().admin().indices().prepareCreate("test") client().admin().indices().prepareCreate("test")

View File

@ -1,8 +1,8 @@
[[search-percolate]] [[search-percolate]]
== Percolator == Percolator
added[5.0.0,Percolator queries modifications aren't visible immediately and a refresh is required] deprecated[5.0.0,Percolate and multi percolate APIs are deprecated and have been replaced by the new <<query-dsl-percolator-query,`percolator` query>>]
added[5.0.0,Percolate and multi percolate APIs have been deprecated and has been replaced by <<query-dsl-percolator-query, the new `percolator` query>>] added[5.0.0,Percolator query modifications only become visible after a refresh has occurred. Previously, they became visible immediately]
added[5.0.0,For indices created on or after version 5.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization] added[5.0.0,For indices created on or after version 5.0.0-alpha1 the percolator automatically indexes the query terms with the percolator queries. This allows the percolator to percolate documents more quickly. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization]

View File

@ -8,6 +8,7 @@
"parts": { "parts": {
"repository": { "repository": {
"type" : "list", "type" : "list",
"required": true,
"description": "Name of repository from which to fetch the snapshot information" "description": "Name of repository from which to fetch the snapshot information"
} }
}, },

View File

@ -30,6 +30,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.client.transport.TransportClient;
@ -1837,7 +1838,8 @@ public final class InternalTestCluster extends TestCluster {
} }
NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node); NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false, false); CommonStatsFlags flags = new CommonStatsFlags(Flag.FieldData, Flag.QueryCache, Flag.Segments);
NodeStats stats = nodeService.stats(flags, false, false, false, false, false, false, false, false, false, false, false);
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));