SOLR-13210: Fix TriLevelCompositeIdRoutingTest to actually make sense

This commit is contained in:
Chris Hostetter 2019-02-06 14:42:30 -07:00
parent ea2956fda3
commit 87ad59f826
1 changed files with 74 additions and 85 deletions

View File

@ -16,6 +16,13 @@
*/ */
package org.apache.solr.cloud; package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocument;
@ -24,18 +31,15 @@ import org.junit.Test;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest { public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
final int NUM_APPS; final int MAX_APP_ID;
final int NUM_USERS; final int MAX_USER_ID;
final int NUM_DOCS; final int MAX_DOC_ID;
final int NUM_ADDS;
@BeforeClass @BeforeClass
@ -51,14 +55,14 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
public TriLevelCompositeIdRoutingTest() { public TriLevelCompositeIdRoutingTest() {
schemaString = "schema15.xml"; // we need a string id schemaString = "schema15.xml"; // we need a string id
sliceCount = TestUtil.nextInt(random(), 1, (TEST_NIGHTLY ? 5 : 3)); // this is the number of *SHARDS* sliceCount = TestUtil.nextInt(random(), 1, (TEST_NIGHTLY ? 5 : 3)); // this is the number of *SHARDS*
int replicationFactor = rarely() ? 2 : 1; // replication is not the focus of this test int replicationFactor = rarely() ? 2 : 1; // replication is not the focus of this test
fixShardCount(replicationFactor * sliceCount); // total num cores, one per node fixShardCount(replicationFactor * sliceCount); // total num cores, one per node
NUM_APPS = atLeast(5); MAX_APP_ID = atLeast(5);
NUM_USERS = atLeast(10); MAX_USER_ID = atLeast(10);
NUM_DOCS = atLeast(100); MAX_DOC_ID = atLeast(20);
NUM_ADDS = atLeast(200);
} }
@Test @Test
@ -71,11 +75,57 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
// todo: do I have to do this here? // todo: do I have to do this here?
waitForRecoveriesToFinish(true); waitForRecoveriesToFinish(true);
doTriLevelHashingTest(); // NOTE: we might randomly generate the same uniqueKey value multiple times,
del("*:*"); // (which is a valid test case, they should route to the same shard both times)
// so we track each expectedId in a set for later sanity checking
final Set<String> expectedUniqueKeys = new HashSet<>();
for (int i = 0; i < NUM_ADDS; i++) {
final int appId = r.nextInt(MAX_APP_ID) + 1;
final int userId = r.nextInt(MAX_USER_ID) + 1;
// skew the odds so half the time we have no mask, and half the time we
// have an even distribution of 1-16 bits
final int bitMask = Math.max(0, r.nextInt(32)-15);
String id = "app" + appId + (bitMask <= 0 ? "" : ("/" + bitMask))
+ "!" + "user" + userId
+ "!" + "doc" + r.nextInt(MAX_DOC_ID);
doAddDoc(id);
expectedUniqueKeys.add(id);
}
commit(); commit();
doTriLevelHashingTestWithBitMask();
final Map<String, String> routePrefixMap = new HashMap<>();
final Set<String> actualUniqueKeys = new HashSet<>();
for (int i = 1; i <= sliceCount; i++) {
final String shardId = "shard" + i;
final Set<String> uniqueKeysInShard = fetchUniqueKeysFromShard(shardId);
{ // sanity check our uniqueKey values aren't duplicated across shards
final Set<String> uniqueKeysOnDuplicateShards = new HashSet<>(uniqueKeysInShard);
uniqueKeysOnDuplicateShards.retainAll(actualUniqueKeys);
assertEquals(shardId + " contains some uniqueKeys that were already found on a previous shard",
Collections.emptySet(), uniqueKeysOnDuplicateShards);
actualUniqueKeys.addAll(uniqueKeysInShard);
}
// foreach uniqueKey, extract it's route prefix and confirm those aren't spread across multiple shards
for (String uniqueKey : uniqueKeysInShard) {
final String routePrefix = uniqueKey.substring(0, uniqueKey.lastIndexOf('!'));
log.debug("shard( {} ) : uniqueKey( {} ) -> routePrefix( {} )", shardId, uniqueKey, routePrefix);
assertNotNull("null prefix WTF? " + uniqueKey, routePrefix);
final String otherShard = routePrefixMap.put(routePrefix, shardId);
if (null != otherShard)
// if we already had a mapping, make sure it's an earlier doc from our current shard...
assertEquals("routePrefix " + routePrefix + " found in multiple shards",
shardId, otherShard);
}
}
assertEquals("Docs missing?", expectedUniqueKeys.size(), actualUniqueKeys.size());
testFinished = true; testFinished = true;
} finally { } finally {
if (!testFinished) { if (!testFinished) {
@ -83,82 +133,21 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
} }
} }
} }
private void doTriLevelHashingTest() throws Exception {
log.info("### STARTING doTriLevelHashingTest");
// for now, we know how ranges will be distributed to shards.
// may have to look it up in clusterstate if that assumption changes.
for (int i = 0; i < NUM_DOCS; i++) {
int appId = r.nextInt(NUM_APPS) + 1;
int userId = r.nextInt(NUM_USERS) + 1;
String id = "app" + appId + "!" + "user" + userId + "!" + "doc" + r.nextInt(100);
doAddDoc(id);
}
commit();
HashMap<String, Integer> idMap = new HashMap<>();
for (int i = 1; i <= sliceCount; i++) {
Set<String> ids = doQueryGetUniqueIdKeys("q", "*:*", "rows", ""+NUM_DOCS, "shards", "shard" + i);
for (String id : ids) {
assertFalse("Found the same route key [" + id + "] in 2 shards.", idMap.containsKey(id));
idMap.put(getKey(id), i);
}
}
}
private void doTriLevelHashingTestWithBitMask() throws Exception {
log.info("### STARTING doTriLevelHashingTestWithBitMask");
// for now, we know how ranges will be distributed to shards.
// may have to look it up in clusterstate if that assumption changes.
for (int i = 0; i < NUM_DOCS; i++) {
int appId = r.nextInt(NUM_APPS) + 1;
int userId = r.nextInt(NUM_USERS) + 1;
int bitMask = r.nextInt(16) + 1;
String id = "app" + appId + "/" + bitMask + "!" + "user" + userId + "!" + "doc" + r.nextInt(100);
doAddDoc(id);
}
commit();
HashMap<String, Integer> idMap = new HashMap<>();
for (int i = 1; i <= sliceCount; i++) {
Set<String> ids = doQueryGetUniqueIdKeys("q", "*:*", "rows", ""+NUM_DOCS, "shards", "shard" + i);
for (String id : ids) {
assertFalse("Found the same route key [" + id + "] in 2 shards.", idMap.containsKey(id));
idMap.put(getKey(id), i);
}
}
}
void doAddDoc(String id) throws Exception { void doAddDoc(String id) throws Exception {
index("id", id); index("id", id);
// todo - target diff servers and use cloud clients as well as non-cloud clients // todo - target diff servers and use cloud clients as well as non-cloud clients
} }
Set<String> doQueryGetUniqueIdKeys(String... queryParams) throws Exception { private Set<String> fetchUniqueKeysFromShard(final String shardId) throws Exception {
QueryResponse rsp = cloudClient.query(params(queryParams)); // NUM_ADDS is an absolute upper bound on the num docs in the index
Set<String> obtainedIdKeys = new HashSet<>(); QueryResponse rsp = cloudClient.query(params("q", "*:*", "rows", ""+NUM_ADDS, "shards", shardId));
Set<String> uniqueKeys = new HashSet<>();
for (SolrDocument doc : rsp.getResults()) { for (SolrDocument doc : rsp.getResults()) {
obtainedIdKeys.add(getKey((String) doc.get("id"))); final String id = (String) doc.get("id");
assertNotNull("null id WTF? " + doc.toString(), id);
uniqueKeys.add(id);
} }
return obtainedIdKeys; return uniqueKeys;
}
private String getKey(String id) {
return id.substring(0, id.lastIndexOf('!'));
} }
} }