added testcase for nodeRole

This commit is contained in:
Noble Paul 2017-05-29 14:49:29 +09:30
parent b1ce4d3b88
commit 4638488ff9
4 changed files with 30 additions and 12 deletions

View File

@ -25,9 +25,13 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.SolrClientDataProvider;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.OverseerNodePrioritizer;
import org.apache.solr.cloud.OverseerTaskProcessor;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.rules.ExpectedException;
@ -55,20 +59,30 @@ public class TestPolicyCloud extends SolrCloudTestCase {
}
public void testDataProvider() throws IOException, SolrServerException {
public void testDataProvider() throws IOException, SolrServerException, KeeperException, InterruptedException {
CollectionAdminRequest.createCollectionWithImplicitRouter("policiesTest", "conf", "shard1", 2)
.process(cluster.getSolrClient());
DocCollection rulesCollection = getCollectionState("policiesTest");
SolrClientDataProvider provider = new SolrClientDataProvider(cluster.getSolrClient());
Map<String, Object> val = provider.getNodeValues(rulesCollection.getReplicas().get(0).getNodeName(), Arrays.asList(
"freedisk",
"cores",
"heapUsage",
"sysLoadAvg"));
assertTrue(((Number) val.get("cores")).intValue() > 0);
assertTrue("freedisk value is "+((Number) val.get("freedisk")).longValue() , ((Number) val.get("freedisk")).longValue() > 0);
assertTrue("heapUsage value is "+((Number) val.get("heapUsage")).longValue() , ((Number) val.get("heapUsage")).longValue() > 0);
assertTrue("sysLoadAvg value is "+((Number) val.get("sysLoadAvg")).longValue() , ((Number) val.get("sysLoadAvg")).longValue() > 0);
assertTrue("freedisk value is " + ((Number) val.get("freedisk")).longValue(), ((Number) val.get("freedisk")).longValue() > 0);
assertTrue("heapUsage value is " + ((Number) val.get("heapUsage")).longValue(), ((Number) val.get("heapUsage")).longValue() > 0);
assertTrue("sysLoadAvg value is " + ((Number) val.get("sysLoadAvg")).longValue(), ((Number) val.get("sysLoadAvg")).longValue() > 0);
String overseerNode = OverseerTaskProcessor.getLeaderNode(cluster.getZkClient());
cluster.getSolrClient().request(CollectionAdminRequest.addRole(overseerNode, "overseer"));
for (int i = 0; i < 10; i++) {
Map<String, Object> data = cluster.getSolrClient().getZkStateReader().getZkClient().getJson(ZkStateReader.ROLES, true);
if (i >= 9 && data == null) {
throw new RuntimeException("NO overseer node created");
}
Thread.sleep(100);
}
val = provider.getNodeValues(overseerNode, Arrays.asList("nodeRole"));
assertEquals("overseer",val.get("nodeRole"));
}
}

View File

@ -95,7 +95,7 @@ public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
AutoScalingSnitch snitch = new AutoScalingSnitch();
ClientSnitchCtx ctx = new ClientSnitchCtx(null, node, snitchSession, solrClient);
snitch.getRemoteInfo(node, new HashSet<>(tags), ctx);
snitch.getTags(node, new HashSet<>(tags), ctx);
nodeVsTags.put(node, ctx.getTags());
return ctx.getTags();
}
@ -162,8 +162,6 @@ public class SolrClientDataProvider implements ClusterDataProvider, MapWriter {
//uses metrics API to get node information
static class AutoScalingSnitch extends ImplicitSnitch {
@Override
protected void getRemoteInfo(String solrNode, Set<String> requestedTags, SnitchContext ctx) {
ClientSnitchCtx snitchContext = (ClientSnitchCtx) ctx;

View File

@ -364,7 +364,12 @@ public class SolrZkClient implements Closeable {
}
public Map<String, Object> getJson(String path, boolean retryOnConnLoss) throws KeeperException, InterruptedException {
byte[] bytes = getData(path, null, null, retryOnConnLoss);
byte[] bytes = null;
try {
bytes = getData(path, null, null, retryOnConnLoss);
} catch (KeeperException.NoNodeException e) {
return null;
}
if (bytes != null && bytes.length > 0) {
return (Map<String, Object>) Utils.fromJSON(bytes);
}

View File

@ -64,7 +64,8 @@ public class ImplicitSnitch extends Snitch {
Matcher hostAndPortMatcher = hostAndPortPattern.matcher(solrNode);
if (hostAndPortMatcher.find()) ctx.getTags().put(PORT, hostAndPortMatcher.group(2));
}
if (requestedTags.contains(ROLE) || requestedTags.contains(NODEROLE)) fillRole(solrNode, ctx);
if (requestedTags.contains(ROLE)) fillRole(solrNode, ctx, ROLE);
if (requestedTags.contains(NODEROLE)) fillRole(solrNode, ctx, NODEROLE);// for new policy framework
addIpTags(solrNode, requestedTags, ctx);
@ -81,7 +82,7 @@ public class ImplicitSnitch extends Snitch {
if (params.size() > 0) ctx.invokeRemote(solrNode, params, "org.apache.solr.cloud.rule.ImplicitSnitch", null);
}
private void fillRole(String solrNode, SnitchContext ctx) {
private void fillRole(String solrNode, SnitchContext ctx, String key) {
Map roles = (Map) ctx.retrieve(ZkStateReader.ROLES); // we don't want to hit the ZK for each node
// so cache and reuse
if(roles == null) roles = ctx.getZkJson(ZkStateReader.ROLES);
@ -91,7 +92,7 @@ public class ImplicitSnitch extends Snitch {
Map.Entry e = (Map.Entry) o;
if (e.getValue() instanceof List) {
if(((List) e.getValue()).contains(solrNode)) {
ctx.getTags().put(ROLE, e.getKey());
ctx.getTags().put(key, e.getKey());
break;
}
}