mirror of https://github.com/apache/lucene.git
SOLR-12866: Turn TestHdfsCloudBackupRestore ON. No changes yet.
This commit is contained in:
parent
e0e5296abc
commit
4589bbe47b
|
@ -26,8 +26,6 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -65,7 +63,6 @@ import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
|
|||
@ThreadLeakFilters(defaultFilters = true, filters = {
|
||||
BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
|
||||
})
|
||||
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12866")
|
||||
public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
|
||||
public static final String SOLR_XML = "<solr>\n" +
|
||||
"\n" +
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
{"clusterstate":{"test_empty_collection":{
|
||||
"pullReplicas":0,
|
||||
"replicationFactor":1,
|
||||
"shards":{
|
||||
"shard2":{
|
||||
"range":"0-7fffffff",
|
||||
"state":"active",
|
||||
"replicas":{}},
|
||||
"shard1_1":{
|
||||
"range":"c0000000-ffffffff",
|
||||
"state":"active",
|
||||
"replicas":{},
|
||||
"stateTimestamp":"1540603288721383849"},
|
||||
"shard1_0":{
|
||||
"range":"80000000-bfffffff",
|
||||
"state":"active",
|
||||
"replicas":{},
|
||||
"stateTimestamp":"1540603288721468797"}},
|
||||
"router":{
|
||||
"name":"compositeId",
|
||||
"field":"shard_s"},
|
||||
"maxShardsPerNode":"-1",
|
||||
"autoAddReplicas":"false",
|
||||
"nrtReplicas":1,
|
||||
"tlogReplicas":0}},
|
||||
|
||||
"replicaInfo":{}}
|
|
@ -2938,6 +2938,74 @@ public class TestPolicy extends SolrTestCaseJ4 {
|
|||
assertEquals("s1", replicaPosition.shard); // sanity check
|
||||
}
|
||||
|
||||
public void testPolicyForEmptyCollection() throws IOException, InterruptedException {
|
||||
Map m = (Map) loadFromResource("testEmptyCollection.json");
|
||||
Map clusterStateMap = (Map) m.remove("clusterstate");
|
||||
Map replicaInfoMap = (Map) m.remove("replicaInfo");
|
||||
|
||||
ClusterState clusterState = ClusterState.load(1, clusterStateMap, ImmutableSet.of("node1", "node2"), CLUSTER_STATE);
|
||||
|
||||
List<String> shards = Arrays.asList("shard1", "shard2", "shard3");
|
||||
|
||||
Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
|
||||
.forCollection("test_empty_collection")
|
||||
.forShard(shards)
|
||||
.assignNrtReplicas(1)
|
||||
.build();
|
||||
|
||||
DelegatingCloudManager cloudManager = new DelegatingCloudManager(null) {
|
||||
@Override
|
||||
public ClusterStateProvider getClusterStateProvider() {
|
||||
return new DelegatingClusterStateProvider(null) {
|
||||
@Override
|
||||
public ClusterState getClusterState() {
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getLiveNodes() {
|
||||
return clusterState.getLiveNodes();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistribStateManager getDistribStateManager() {
|
||||
return new DelegatingDistribStateManager(null) {
|
||||
@Override
|
||||
public AutoScalingConfig getAutoScalingConfig() {
|
||||
return new AutoScalingConfig(new HashMap<>());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public NodeStateProvider getNodeStateProvider() {
|
||||
return new DelegatingNodeStateProvider(null) {
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> keys) {
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Map<String, List<ReplicaInfo>>> getReplicaInfo(String node, Collection<String> keys) {
|
||||
//return Collections.EMPTY_MAP;
|
||||
return replicaInfoMap;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
|
||||
ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
|
||||
DocCollection collection = state.getCollection("test_empty_collection");
|
||||
|
||||
Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(state, collection);
|
||||
List<ReplicaPosition> replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
|
||||
assertEquals(2,replicaPositions.stream().map((rp)-> rp.node).distinct().count());
|
||||
assertEquals(3,replicaPositions.stream().map((rp)-> rp.shard).distinct().count());
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that an empty policy should not persist implicitly added keys to MapWriter
|
||||
* <p>
|
||||
|
|
Loading…
Reference in New Issue