SOLR-12555: Use `expectThrows` for expected exceptions

This commit replaces the `try { doX(); fail(); }` pattern with the
`expectThrows` test helper, which was created for this purpose.  This
commit makes these changes in the core package: `o.a.solr.cloud`.

Closes #425
This commit is contained in:
Jason Gerlowski 2018-08-09 15:42:17 -04:00
parent 63fc1246f7
commit 00aeb64c10
27 changed files with 448 additions and 573 deletions

View File

@ -356,42 +356,39 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 {
// test that malformed numerics cause client error not server error
for (String field : FIELDS) {
try {
h.update(add( doc("id","100", field, BAD_VALUE)));
fail("Didn't encounter an error trying to add a bad date: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not an (update) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(update) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
assertTrue("client error does not mention document id: " + msg,
msg.contains("[doc=100]"));
}
SolrException e1 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a bad date: " + field,
() -> h.update(add( doc("id","100", field, BAD_VALUE))));
String msg1 = e1.getMessage();
assertTrue("not an (update) client error on field: " + field +" : "+ msg1,
400 <= e1.code() && e1.code() < 500);
assertTrue("(update) client error does not mention bad value: " + msg1,
msg1.contains(BAD_VALUE));
assertTrue("client error does not mention document id: " + msg1,
msg1.contains("[doc=100]"));
SchemaField sf = h.getCore().getLatestSchema().getField(field);
if (!sf.hasDocValues() && !sf.indexed()) {
continue;
}
try {
h.query(req("q",field + ":" + BAD_VALUE));
fail("Didn't encounter an error trying to query a bad date: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
}
try {
h.query(req("q",field + ":[NOW TO " + BAD_VALUE + "]"));
fail("Didn't encounter an error trying to query a bad date: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
}
SolrException e2 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a bad date: " + field,
() -> h.query(req("q",field + ":" + BAD_VALUE))
);
String msg2 = e2.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg2,
400 <= e2.code() && e2.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg2,
msg2.contains(BAD_VALUE));
SolrException e3 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a bad date: " + field,
() -> h.query(req("q",field + ":[NOW TO " + BAD_VALUE + "]"))
);
String msg3 = e3.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg3,
400 <= e3.code() && e3.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg3,
msg3.contains(BAD_VALUE));
}
}
@ -414,42 +411,40 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 {
// test that malformed numerics cause client error not server error
for (String field : FIELDS) {
try {
h.update(add( doc("id","100", field, BAD_VALUE)));
fail("Didn't encounter an error trying to add a non-number: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not an (update) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(update) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
SolrException e1 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a non-number: " + field,
() -> h.update(add( doc("id","100", field, BAD_VALUE))));
String msg1 = e1.toString();
assertTrue("not an (update) client error on field: " + field +" : "+ msg1,
400 <= e1.code() && e1.code() < 500);
assertTrue("(update) client error does not mention bad value: " + msg1,
msg1.contains(BAD_VALUE));
assertTrue("client error does not mention document id",
msg.contains("[doc=100]"));
}
msg1.contains("[doc=100]"));
SchemaField sf = h.getCore().getLatestSchema().getField(field);
if (!sf.hasDocValues() && !sf.indexed()) {
continue;
}
try {
h.query(req("q",field + ":" + BAD_VALUE));
fail("Didn't encounter an error trying to query a non-number: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
}
try {
h.query(req("q",field + ":[10 TO " + BAD_VALUE + "]"));
fail("Didn't encounter an error trying to query a non-number: " + field);
} catch (SolrException e) {
String msg = e.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg,
400 <= e.code() && e.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg,
msg.contains(BAD_VALUE));
}
SolrException e2 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a non-number: " + field,
() -> h.query(req("q",field + ":" + BAD_VALUE))
);
String msg2 = e2.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg2,
400 <= e2.code() && e2.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg2,
msg2.contains(BAD_VALUE));
SolrException e3 = expectThrows(SolrException.class,
"Didn't encounter an error trying to add a non-number: " + field,
() -> h.query(req("q",field + ":[10 TO " + BAD_VALUE + "]"))
);
String msg3 = e3.toString();
assertTrue("not a (search) client error on field: " + field +" : "+ msg3,
400 <= e3.code() && e3.code() < 500);
assertTrue("(search) client error does not mention bad value: " + msg3,
msg3.contains(BAD_VALUE));
}
}
@ -1002,16 +997,14 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 {
assertU(commit());
try {
RuntimeException outerEx = expectThrows(RuntimeException.class, () -> {
ignoreException("can not sort on multivalued field: sortabuse_t");
assertQ("sort on something that shouldn't work",
req("q", "sortabuse_b:true",
"sort", "sortabuse_t asc"),
"*[count(//doc)=2]");
fail("no error encountered when sorting on sortabuse_t");
} catch (Exception outer) {
// EXPECTED
Throwable root = getRootCause(outer);
});
Throwable root = getRootCause(outerEx);
assertEquals("sort exception root cause",
SolrException.class, root.getClass());
SolrException e = (SolrException) root;
@ -1019,8 +1012,7 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 {
SolrException.ErrorCode.BAD_REQUEST,
SolrException.ErrorCode.getErrorCode(e.code()));
assertTrue("exception doesn't contain field name",
-1 != e.getMessage().indexOf("sortabuse_t"));
}
e.getMessage().contains("sortabuse_t"));
}
// /** this doesn't work, but if it did, this is how we'd test it. */

View File

@ -842,10 +842,10 @@ public class CursorPagingTest extends SolrTestCaseJ4 {
throws Exception {
try {
SolrException e = expectThrows(SolrException.class, () -> {
ignoreException(expSubstr);
assertJQ(req(p));
fail("no exception matching expected: " + expCode.code + ": " + expSubstr);
} catch (SolrException e) {
});
assertEquals(expCode.code, e.code());
assertTrue("Expected substr not found: " + expSubstr + " <!< " + e.getMessage(),
e.getMessage().contains(expSubstr));

View File

@ -53,11 +53,12 @@ public class TestCursorMarkWithoutUniqueKey extends SolrTestCaseJ4 {
try {
ignoreException("Cursor functionality is not available unless the IndexSchema defines a uniqueKey field");
assertQ(req("q", "*:*", "sort", "fld desc", "cursorMark", CURSOR_MARK_START));
fail("No exception when querying with a cursorMark with no uniqueKey defined.");
} catch (Exception e) {
expectThrows(RuntimeException.class,
"No exception when querying with a cursorMark with no uniqueKey defined.",
() -> assertQ(req("q", "*:*", "sort", "fld desc", "cursorMark", CURSOR_MARK_START))
);
} finally {
unIgnoreException("Cursor functionality is not available unless the IndexSchema defines a uniqueKey field");
}
}
}

View File

@ -911,13 +911,11 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
//SOLR 3161 ensure shards.qt=/update fails (anything but search handler really)
// Also see TestRemoteStreaming#testQtUpdateFails()
try {
ignoreException("isShard is only acceptable");
//SolrException e = expectThrows(SolrException.class, () -> {
// ignoreException("isShard is only acceptable");
// query("q","*:*","shards.qt","/update","stream.body","<delete><query>*:*</query></delete>");
// fail();
} catch (SolrException e) {
//expected
}
//});
unIgnoreException("isShard is only acceptable");
// test debugging
@ -1219,41 +1217,34 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
private void validateCommonQueryParameters() throws Exception {
ignoreException("parameter cannot be negative");
try {
SolrException e1 = expectThrows(SolrException.class, () -> {
SolrQuery query = new SolrQuery();
query.setParam("start", "non_numeric_value").setQuery("*");
QueryResponse resp = query(query);
fail("Expected the last query to fail, but got response: " + resp);
} catch (SolrException e) {
assertEquals(ErrorCode.BAD_REQUEST.code, e.code());
}
});
assertEquals(ErrorCode.BAD_REQUEST.code, e1.code());
try {
SolrException e2 = expectThrows(SolrException.class, () -> {
SolrQuery query = new SolrQuery();
query.setStart(-1).setQuery("*");
QueryResponse resp = query(query);
fail("Expected the last query to fail, but got response: " + resp);
} catch (SolrException e) {
assertEquals(ErrorCode.BAD_REQUEST.code, e.code());
}
});
assertEquals(ErrorCode.BAD_REQUEST.code, e2.code());
try {
SolrException e3 = expectThrows(SolrException.class, () -> {
SolrQuery query = new SolrQuery();
query.setRows(-1).setStart(0).setQuery("*");
QueryResponse resp = query(query);
fail("Expected the last query to fail, but got response: " + resp);
} catch (SolrException e) {
assertEquals(ErrorCode.BAD_REQUEST.code, e.code());
}
});
assertEquals(ErrorCode.BAD_REQUEST.code, e3.code());
try {
SolrException e4 = expectThrows(SolrException.class, () -> {
SolrQuery query = new SolrQuery();
query.setParam("rows", "non_numeric_value").setQuery("*");
QueryResponse resp = query(query);
fail("Expected the last query to fail, but got response: " + resp);
} catch (SolrException e) {
assertEquals(ErrorCode.BAD_REQUEST.code, e.code());
}
});
assertEquals(ErrorCode.BAD_REQUEST.code, e4.code());
resetExceptionIgnores();
}
}

View File

@ -131,12 +131,9 @@ public class TestTolerantSearch extends SolrJettyTestBase {
query.setFacet(true);
ignoreException("Dummy exception in BadResponseWriter");
try {
collection1.query(query);
fail("Should get an exception");
} catch (Exception e) {
//expected
}
expectThrows(SolrException.class, () -> collection1.query(query));
query.set(ShardParams.SHARDS_TOLERANT, "true");
QueryResponse response = collection1.query(query);
assertTrue(response.getResponseHeader().getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
@ -179,12 +176,9 @@ public class TestTolerantSearch extends SolrJettyTestBase {
query.setFacet(true);
ignoreException("Dummy exception in BadResponseWriter");
try {
collection1.query(query);
fail("Should get an exception");
} catch (Exception e) {
//expected
}
expectThrows(Exception.class, () -> collection1.query(query));
query.set(ShardParams.SHARDS_TOLERANT, "true");
QueryResponse response = collection1.query(query);
assertTrue(response.getResponseHeader().getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));

View File

@ -116,12 +116,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
long docId = testUpdateAndDelete();
// index a bad doc...
try {
indexr(t1, "a doc with no id");
fail("this should fail");
} catch (SolrException e) {
// expected
}
expectThrows(SolrException.class, () -> indexr(t1, "a doc with no id"));
// TODO: bring this to its own method?
// try indexing to a leader that has no replicas up
@ -271,13 +266,10 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
// ensure shard is dead
try {
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1,
"specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
// expected..
}
expectThrows(SolrServerException.class,
"This server should be down and this update should have failed",
() -> index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1, "specific doc!")
);
commit();

View File

@ -713,24 +713,20 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
log.info("### STARTING doOptimisticLockingAndUpdating");
printLayout();
SolrInputDocument sd = sdoc("id", 1000, "_version_", -1);
final SolrInputDocument sd = sdoc("id", 1000, "_version_", -1);
indexDoc(sd);
ignoreException("version conflict");
for (SolrClient client : clients) {
try {
client.add(sd);
fail();
} catch (SolrException e) {
SolrException e = expectThrows(SolrException.class, () -> client.add(sd));
assertEquals(409, e.code());
}
}
unIgnoreException("version conflict");
// TODO: test deletes. SolrJ needs a good way to pass version for delete...
sd = sdoc("id", 1000, "foo_i",5);
clients.get(0).add(sd);
final SolrInputDocument sd2 = sdoc("id", 1000, "foo_i",5);
clients.get(0).add(sd2);
List<Integer> expected = new ArrayList<>();
int val = 0;

View File

@ -150,15 +150,15 @@ public class BasicZkTest extends AbstractZkTestCase {
zkController.getZkClient().setData("/configs/conf1/solrconfig.xml", new byte[0], true);
// we set the solrconfig to nothing, so this reload should fail
try {
SolrException e = expectThrows(SolrException.class,
"The reloaded SolrCore did not pick up configs from zookeeper",
() -> {
ignoreException("solrconfig.xml");
h.getCoreContainer().reload(h.getCore().getName());
fail("The reloaded SolrCore did not pick up configs from zookeeper");
} catch(SolrException e) {
});
resetExceptionIgnores();
assertTrue(e.getMessage().contains("Unable to reload core [collection1]"));
assertTrue(e.getCause().getMessage().contains("Error loading solr config from solrconfig.xml"));
}
// test stats call
Map<String, Metric> metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics();

View File

@ -514,31 +514,25 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
waitForState("Expecting attribute 'maxShardsPerNode' to be deleted", collection,
(n, c) -> null == c.get("maxShardsPerNode"));
try {
CollectionAdminRequest.modifyCollection(collection, null)
expectThrows(IllegalArgumentException.class,
"An attempt to set unknown collection attribute should have failed",
() -> CollectionAdminRequest.modifyCollection(collection, null)
.setAttribute("non_existent_attr", 25)
.process(cluster.getSolrClient());
fail("An attempt to set unknown collection attribute should have failed");
} catch (IllegalArgumentException e) {
// expected
}
.process(cluster.getSolrClient())
);
try {
CollectionAdminRequest.modifyCollection(collection, null)
expectThrows(IllegalArgumentException.class,
"An attempt to set null value should have failed",
() -> CollectionAdminRequest.modifyCollection(collection, null)
.setAttribute("non_existent_attr", null)
.process(cluster.getSolrClient());
fail("An attempt to set null value should have failed");
} catch (IllegalArgumentException e) {
// expected
}
.process(cluster.getSolrClient())
);
try {
CollectionAdminRequest.modifyCollection(collection, null)
expectThrows(IllegalArgumentException.class,
"An attempt to unset unknown collection attribute should have failed",
() -> CollectionAdminRequest.modifyCollection(collection, null)
.unsetAttribute("non_existent_attr")
.process(cluster.getSolrClient());
fail("An attempt to unset unknown collection attribute should have failed");
} catch (IllegalArgumentException e) {
// expected
}
.process(cluster.getSolrClient())
);
}
}

View File

@ -145,19 +145,15 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
CollectionAdminRequest.deleteReplicasFromShard(collectionName, "shard1", 2).process(cluster.getSolrClient());
waitForState("Expected a single shard with a single replica", collectionName, clusterShape(1, 1));
try {
CollectionAdminRequest.deleteReplicasFromShard(collectionName, "shard1", 1).process(cluster.getSolrClient());
fail("Expected Exception, Can't delete the last replica by count");
} catch (SolrException e) {
// expected
SolrException e = expectThrows(SolrException.class,
"Can't delete the last replica by count",
() -> CollectionAdminRequest.deleteReplicasFromShard(collectionName, "shard1", 1).process(cluster.getSolrClient())
);
assertEquals(SolrException.ErrorCode.BAD_REQUEST.code, e.code());
assertTrue(e.getMessage().contains("There is only one replica available"));
}
DocCollection docCollection = getCollectionState(collectionName);
// We know that since leaders are preserved, PULL replicas should not be left alone in the shard
assertEquals(0, docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).size());
}
@Test
@ -257,6 +253,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
return false;
}
LOG.info("Running delete core {}",cd);
try {
ZkNodeProps m = new ZkNodeProps(
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),

View File

@ -317,13 +317,9 @@ public class ForceLeaderTest extends HttpPartitionTest {
private void assertSendDocFails(int docId) throws Exception {
// sending a doc in this state fails
try {
sendDoc(docId);
log.error("Should've failed indexing during a down state. Cluster state: " + printClusterStateInfo());
fail("Should've failed indexing during a down state.");
} catch (SolrException ex) {
log.info("Document couldn't be sent, which is expected.");
}
expectThrows(SolrException.class,
"Should've failed indexing during a down state.",
() -> sendDoc(docId));
}
private void putNonLeadersIntoLIR(String collectionName, String shard, ZkController zkController, Replica leader, List<Replica> notLeaders) throws Exception {

View File

@ -16,6 +16,7 @@
*/
package org.apache.solr.cloud;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.util.NamedList;
@ -58,14 +59,14 @@ public class OverseerStatusTest extends SolrCloudTestCase {
SimpleOrderedMap<Object> reload = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.RELOAD.toLower());
assertEquals("No stats for reload in OverseerCollectionProcessor", 1, reload.get("requests"));
try {
CollectionAdminRequest.splitShard("non_existent_collection")
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
"Split shard for non existent collection should have failed",
() -> CollectionAdminRequest
.splitShard("non_existent_collection")
.setShardName("non_existent_shard")
.process(cluster.getSolrClient());
fail("Split shard for non existent collection should have failed");
} catch (Exception e) {
// expected because we did not correctly specify required params for split
}
.process(cluster.getSolrClient())
);
resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
collection_operations = (NamedList<Object>) resp.get("collection_operations");
SimpleOrderedMap<Object> split = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.SPLITSHARD.toLower());

View File

@ -140,12 +140,12 @@ public class SolrXmlInZkTest extends SolrTestCaseJ4 {
@Test
public void testNotInZkOrOnDisk() throws Exception {
try {
SolrException e = expectThrows(SolrException.class, () -> {
System.setProperty("hostPort", "8787");
setUpZkAndDiskXml(false, false); // solr.xml not on disk either
fail("Should have thrown an exception here");
} catch (SolrException solre) {
});
assertTrue("Should be failing to create default solr.xml in code",
solre.getMessage().contains("solr.xml does not exist"));
e.getMessage().contains("solr.xml does not exist"));
} finally {
closeZK();
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
@ -79,12 +80,9 @@ public class TestAuthenticationFramework extends SolrCloudTestCase {
// Should fail with 401
try {
collectionCreateSearchDeleteTwice();
fail("Should've returned a 401 error");
} catch (Exception ex) {
if (!ex.getMessage().contains("Error 401")) {
fail("Should've returned a 401 error");
}
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
this::collectionCreateSearchDeleteTwice);
assertTrue("Should've returned a 401 error", e.getMessage().contains("Error 401"));
} finally {
MockAuthenticationPlugin.expectedUsername = null;
MockAuthenticationPlugin.expectedPassword = null;

View File

@ -30,7 +30,6 @@ import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
@ -199,13 +198,11 @@ public class TestCloudDeleteByQuery extends SolrCloudTestCase {
public void testMalformedDBQ(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
try {
UpdateResponse rsp = update(params()).deleteByQuery("foo_i:not_a_num").process(client);
fail("Expected DBQ failure: " + rsp.toString());
} catch (SolrException e) {
SolrException e = expectThrows(SolrException.class,
"Expected DBQ failure",
() -> update(params()).deleteByQuery("foo_i:not_a_num").process(client));
assertEquals("not the expected DBQ failure: " + e.getMessage(), 400, e.code());
}
}
//
public void testMalformedDBQViaCloudClient() throws Exception {

View File

@ -100,20 +100,15 @@ public class TestCloudInspectUtil extends SolrTestCaseJ4 {
// ################################
addFails = new HashSet<String>();
deleteFails = new HashSet<String>();
final HashSet<String> addFailsExpectEx = new HashSet<String>();
final HashSet<String> deleteFailsExpectEx = new HashSet<String>();
a = getDocList("2", "3", "4");
b = getDocList("2", "3", "4");
try {
legal = CloudInspectUtil.checkIfDiffIsLegal(a, b, "control", "cloud",
addFails, deleteFails);
fail("Expected exception because lists have no diff");
} catch (IllegalArgumentException e) {
// expected
}
final SolrDocumentList aExpectEx = getDocList("2", "3", "4");
final SolrDocumentList bExpectEx = getDocList("2", "3", "4");
expectThrows(IllegalArgumentException.class, "Expected exception because lists have no diff",
() -> CloudInspectUtil.checkIfDiffIsLegal(aExpectEx, bExpectEx,
"control", "cloud", addFailsExpectEx, deleteFailsExpectEx));
}
private SolrDocumentList getDocList(String ... ids) {

View File

@ -92,7 +92,6 @@ import org.apache.solr.util.ExternalPaths;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.noggit.JSONParser;
@ -632,14 +631,10 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
private void verifyException(SolrClient solrClient, ConfigSetAdminRequest request,
String errorContains) throws Exception {
try {
solrClient.request(request);
Assert.fail("Expected exception");
} catch (Exception e) {
Exception e = expectThrows(Exception.class, () -> solrClient.request(request));
assertTrue("Expected exception message to contain: " + errorContains
+ " got: " + e.getMessage(), e.getMessage().contains(errorContains));
}
}
@Test
public void testDelete() throws Exception {

View File

@ -38,7 +38,6 @@ import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
import org.apache.solr.client.solrj.request.ConfigSetAdminRequest.Create;
import org.apache.solr.client.solrj.response.ConfigSetAdminResponse;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkConfigManager;
@ -58,7 +57,6 @@ import org.apache.zookeeper.server.ZKDatabase;
import org.apache.zookeeper.server.quorum.Leader.Proposal;
import org.apache.zookeeper.txn.TxnHeader;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -112,14 +110,10 @@ public class TestConfigSetsAPIZkFailure extends SolrTestCaseJ4 {
Create create = new Create();
create.setBaseConfigSetName(BASE_CONFIGSET_NAME).setConfigSetName(CONFIGSET_NAME);
try {
ConfigSetAdminResponse response = create.process(solrClient);
Assert.fail("Expected solr exception");
} catch (RemoteSolrException se) {
RemoteSolrException se = expectThrows(RemoteSolrException.class, () -> create.process(solrClient));
// partial creation should have been cleaned up
assertFalse(configManager.configExists(CONFIGSET_NAME));
assertEquals(SolrException.ErrorCode.SERVER_ERROR.code, se.code());
}
} finally {
zkClient.close();
}

View File

@ -69,14 +69,13 @@ public class TestDownShardTolerantSearch extends SolrCloudTestCase {
assertThat(response.getStatus(), is(0));
assertTrue(response.getResults().getNumFound() > 0);
try {
cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, false));
fail("Request should have failed because we killed shard1 jetty");
} catch (SolrServerException e) {
log.info("error from server", e);
SolrServerException e = expectThrows(SolrServerException.class,
"Request should have failed because we killed shard1 jetty",
() -> cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1)
.setParam(ShardParams.SHARDS_TOLERANT, false))
);
assertNotNull(e.getCause());
assertTrue("Error message from server should have the name of the down shard",
e.getCause().getMessage().contains("shard"));
}
}
}

View File

@ -51,8 +51,8 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
final String leaderCoreNodeName = shardToLeaderJetty.get(SHARD1).coreNodeName;
final CloudJettyRunner leaderRunner = shardToLeaderJetty.get(SHARD1);
CoreContainer coreContainer = leaderRunner.jetty.getCoreContainer();
ZkController zkController = coreContainer.getZkController();
final CoreContainer coreContainer1 = leaderRunner.jetty.getCoreContainer();
final ZkController zkController1 = coreContainer1.getZkController();
CloudJettyRunner notLeader = null;
for (CloudJettyRunner cloudJettyRunner : shardToJetty.get(SHARD1)) {
@ -83,22 +83,21 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
/*
1. Test that publishDownState throws exception when zkController.isReplicaInRecoveryHandling == false
*/
try {
LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
SolrException e = expectThrows(SolrException.class,
"publishDownState should not have succeeded because replica url is not marked in leader initiated recovery in ZkController",
() -> {
LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd);
assertFalse(zkController.isReplicaInRecoveryHandling(replicaCoreNodeProps.getCoreUrl()));
assertFalse(zkController1.isReplicaInRecoveryHandling(replicaCoreNodeProps.getCoreUrl()));
thread.run();
fail("publishDownState should not have succeeded because replica url is not marked in leader initiated recovery in ZkController");
} catch (SolrException e) {
assertTrue(e.code() == SolrException.ErrorCode.INVALID_STATE.code);
}
});
assertEquals(e.code(), SolrException.ErrorCode.INVALID_STATE.code);
/*
2. Test that a non-live replica cannot be put into LIR or down state
*/
LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
LeaderInitiatedRecoveryThread thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd);
// kill the replica
int children = cloudClient.getZkStateReader().getZkClient().getChildren("/live_nodes", null, true).size();
@ -129,7 +128,7 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
ChaosMonkey.start(notLeader.jetty);
waitForRecoveriesToFinish(true);
thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
@Override
protected void updateLIRState(String replicaCoreNodeName) {
@ -138,13 +137,13 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
};
assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
assertNull(zkController.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
/*
4. Test that if ZK connection loss or session expired then thread should not attempt to publish down state even if forcePublish=true
*/
thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
@Override
protected void updateLIRState(String replicaCoreNodeName) {
@ -153,13 +152,13 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
};
assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false));
assertFalse(thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), true));
assertNull(zkController.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
/*
5. Test that any exception other then ZK connection loss or session expired should publish down state only if forcePublish=true
*/
thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
thread = new LeaderInitiatedRecoveryThread(zkController1, coreContainer1,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, cd) {
@Override
protected void updateLIRState(String replicaCoreNodeName) {
@ -187,26 +186,21 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
Thread.sleep(500);
}
assertNull(zkController.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertNull(zkController1.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertEquals(Replica.State.DOWN, cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getReplica(replica.getName()).getState());
/*
6. Test that non-leader cannot set LIR nodes
*/
coreContainer = notLeader.jetty.getCoreContainer();
zkController = coreContainer.getZkController();
final CoreContainer coreContainer2 = notLeader.jetty.getCoreContainer();
final ZkController zkController2 = coreContainer2.getZkController();
thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer.getCores().iterator().next().getCoreDescriptor()) {
thread = new LeaderInitiatedRecoveryThread(zkController2, coreContainer2,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer2.getCores().iterator().next().getCoreDescriptor()) {
@Override
protected void updateLIRState(String replicaCoreNodeName) {
try {
super.updateLIRState(replicaCoreNodeName);
} catch (Exception e) {
assertTrue(e instanceof ZkController.NotLeaderException);
throw e;
}
throw expectThrows(ZkController.NotLeaderException.class, () -> super.updateLIRState(replicaCoreNodeName));
}
};
cversion = getOverseerCversion();
@ -217,21 +211,21 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest
7. assert that we can write a LIR state if everything else is fine
*/
// reset the zkcontroller to the one from the leader
coreContainer = leaderRunner.jetty.getCoreContainer();
zkController = coreContainer.getZkController();
thread = new LeaderInitiatedRecoveryThread(zkController, coreContainer,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer.getCores().iterator().next().getCoreDescriptor());
final CoreContainer coreContainer3 = leaderRunner.jetty.getCoreContainer();
final ZkController zkController3 = coreContainer3.getZkController();
thread = new LeaderInitiatedRecoveryThread(zkController3, coreContainer3,
DEFAULT_COLLECTION, SHARD1, replicaCoreNodeProps, 1, coreContainer3.getCores().iterator().next().getCoreDescriptor());
thread.publishDownState(replicaCoreNodeProps.getCoreName(), replica.getName(), replica.getNodeName(), replicaCoreNodeProps.getCoreUrl(), false);
timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
while (!timeOut.hasTimedOut()) {
Replica.State state = zkController.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName());
Replica.State state = zkController3.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName());
if (state == Replica.State.DOWN) {
break;
}
Thread.sleep(500);
}
assertNotNull(zkController.getLeaderInitiatedRecoveryStateObject(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertEquals(Replica.State.DOWN, zkController.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertNotNull(zkController3.getLeaderInitiatedRecoveryStateObject(DEFAULT_COLLECTION, SHARD1, replica.getName()));
assertEquals(Replica.State.DOWN, zkController3.getLeaderInitiatedRecoveryState(DEFAULT_COLLECTION, SHARD1, replica.getName()));
/*
7. Test that

View File

@ -152,12 +152,13 @@ public void testCantConnectToPullReplica() throws Exception {
assertNumDocs(10 + i, leaderClient);
}
}
SolrServerException e = expectThrows(SolrServerException.class, () -> {
try(HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
pullReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound();
fail("Shouldn't be able to query the pull replica");
} catch (SolrServerException e) {
//expected
}
});
assertNumberOfReplicas(numShards, 0, numShards, true, true);// Replica should still be active, since it doesn't disconnect from ZooKeeper
{
long numFound = 0;

View File

@ -16,7 +16,6 @@
*/
package org.apache.solr.cloud;
import junit.framework.Assert;
import org.apache.hadoop.util.Time;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.SolrTestCaseJ4;
@ -383,33 +382,31 @@ public class TestSolrCloudWithDelegationTokens extends SolrTestCaseJ4 {
SolrRequest request = getAdminRequest(new ModifiableSolrParams());
// test without token
HttpSolrClient ss =
final HttpSolrClient ssWoToken =
new HttpSolrClient.Builder(solrClientPrimary.getBaseURL().toString())
.withResponseParser(solrClientPrimary.getParser())
.build();
try {
doSolrRequest(ss, request, ErrorCode.UNAUTHORIZED.code);
doSolrRequest(ssWoToken, request, ErrorCode.UNAUTHORIZED.code);
} finally {
ss.close();
ssWoToken.close();
}
ss = new HttpSolrClient.Builder(solrClientPrimary.getBaseURL().toString())
final HttpSolrClient ssWToken = new HttpSolrClient.Builder(solrClientPrimary.getBaseURL().toString())
.withKerberosDelegationToken(token)
.withResponseParser(solrClientPrimary.getParser())
.build();
try {
// test with token via property
doSolrRequest(ss, request, HttpStatus.SC_OK);
doSolrRequest(ssWToken, request, HttpStatus.SC_OK);
// test with param -- should throw an exception
ModifiableSolrParams tokenParam = new ModifiableSolrParams();
tokenParam.set("delegation", "invalidToken");
try {
doSolrRequest(ss, getAdminRequest(tokenParam), ErrorCode.FORBIDDEN.code);
Assert.fail("Expected exception");
} catch (IllegalArgumentException ex) {}
expectThrows(IllegalArgumentException.class,
() -> doSolrRequest(ssWToken, getAdminRequest(tokenParam), ErrorCode.FORBIDDEN.code));
} finally {
ss.close();
ssWToken.close();
}
}
}

View File

@ -223,38 +223,29 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
@Test
public void testProxyNoConfigGroups() throws Exception {
try {
solrClient.request(getProxyRequest("noGroups","bar"));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedGroupExMsg("noGroups", "bar")));
}
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("noGroups","bar"))
);
assertTrue(e.getMessage().contains(getExpectedGroupExMsg("noGroups", "bar")));
}
@Test
public void testProxyWrongHost() throws Exception {
try {
solrClient.request(getProxyRequest("wrongHost","bar"));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedHostExMsg("wrongHost")));
}
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("wrongHost","bar"))
);
assertTrue(e.getMessage().contains(getExpectedHostExMsg("wrongHost")));
}
@Test
public void testProxyNoConfigHosts() throws Exception {
try {
solrClient.request(getProxyRequest("noHosts","bar"));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("noHosts","bar"))
);
// FixMe: this should return an exception about the host being invalid,
// but a bug (HADOOP-11077) causes an NPE instead.
// assertTrue(ex.getMessage().contains(getExpectedHostExMsg("noHosts")));
}
}
@Test
public void testProxyValidateAnyHostAnyUser() throws Exception {
@ -264,14 +255,11 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
@Test
public void testProxyInvalidProxyUser() throws Exception {
try {
// wrong direction, should fail
solrClient.request(getProxyRequest("bar","anyHostAnyUser"));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedGroupExMsg("bar", "anyHostAnyUser")));
}
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("bar","anyHostAnyUser"))
);
assertTrue(e.getMessage().contains(getExpectedGroupExMsg("bar", "anyHostAnyUser")));
}
@Test
@ -290,49 +278,38 @@ public class TestSolrCloudWithSecureImpersonation extends SolrTestCaseJ4 {
@Test
public void testProxyUnknownRemote() throws Exception {
try {
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> {
// Use a reserved ip address
String nonProxyUserConfiguredIpAddress = "255.255.255.255";
solrClient.request(getProxyRequest("localHostAnyGroup", "bar", "unknownhost.bar.foo", nonProxyUserConfiguredIpAddress));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedHostExMsg("localHostAnyGroup")));
}
});
assertTrue(e.getMessage().contains(getExpectedHostExMsg("localHostAnyGroup")));
}
@Test
public void testProxyInvalidRemote() throws Exception {
try {
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> {
String invalidIpAddress = "-127.-128";
solrClient.request(getProxyRequest("localHostAnyGroup","bar", "[ff01::114]", invalidIpAddress));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedHostExMsg("localHostAnyGroup")));
}
});
assertTrue(e.getMessage().contains(getExpectedHostExMsg("localHostAnyGroup")));
}
@Test
public void testProxyInvalidGroup() throws Exception {
try {
solrClient.request(getProxyRequest("bogusGroup","bar", null));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedGroupExMsg("bogusGroup", "bar")));
}
HttpSolrClient.RemoteSolrException e = expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("bogusGroup","bar", null))
);
assertTrue(e.getMessage().contains(getExpectedGroupExMsg("bogusGroup", "bar")));
}
@Test
public void testProxyNullProxyUser() throws Exception {
try {
solrClient.request(getProxyRequest("","bar"));
fail("Expected RemoteSolrException");
}
catch (HttpSolrClient.RemoteSolrException ex) {
// this exception is specific to our implementation, don't check a specific message.
}
expectThrows(HttpSolrClient.RemoteSolrException.class,
() -> solrClient.request(getProxyRequest("","bar"))
);
}
@Test

View File

@ -239,41 +239,35 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
assertQueryDocIds(c, false, "id_not_exists");
// verify adding 2 broken docs causes a clint exception
try {
UpdateResponse rsp = update(params(),
SolrException e = expectThrows(SolrException.class,
"did not get a top level exception when more then 10 docs failed", () ->
update(params(),
doc(f("id", S_ONE_PRE + "X"), f("foo_i", "bogus_val_X")),
doc(f("id", S_TWO_PRE + "Y"), f("foo_i", "bogus_val_Y"))
).process(c);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
).process(c)
);
assertEquals("not the type of error we were expecting ("+e.code()+"): " + e.toString(),
400, e.code());
}
// verify malformed deleteByQuerys fail
try {
UpdateResponse rsp = update(params()).deleteByQuery("foo_i:not_a_num").process(c);
fail("sanity check for malformed DBQ didn't fail: " + rsp.toString());
} catch (SolrException e) {
e = expectThrows(SolrException.class,
"sanity check for malformed DBQ didn't fail",
() -> update(params()).deleteByQuery("foo_i:not_a_num").process(c));
assertEquals("not the expected DBQ failure: " + e.getMessage(), 400, e.code());
}
// verify opportunistic concurrency deletions fail as we expect when docs are / aren't present
for (UpdateRequest r : new UpdateRequest[] {
update(params("commit", "true")).deleteById(S_ONE_PRE + "1", -1L),
update(params("commit", "true")).deleteById(S_TWO_PRE + "2", -1L),
update(params("commit", "true")).deleteById("id_not_exists", 1L) }) {
try {
UpdateResponse rsp = r.process(c);
fail("sanity check for opportunistic concurrency delete didn't fail: "
+ r.toString() + " => " + rsp.toString());
} catch (SolrException e) {
e = expectThrows(SolrException.class, "sanity check for opportunistic concurrency delete didn't fail",
() -> r.process(c)
);
assertEquals("not the expected opportunistic concurrency failure code: "
+ r.toString() + " => " + e.getMessage(), 409, e.code());
}
}
}
}
//
public void testVariousDeletesViaCloudClient() throws Exception {
@ -538,10 +532,9 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, more then 10 (total) should fail
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10",
"commit", "true"),
SolrException e = expectThrows(SolrException.class,
"did not get a top level exception when more then 10 docs failed",
() -> update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"),
doc(f("id", S_ONE_PRE + "11")),
doc(f("id", S_TWO_PRE + "21"), f("foo_i", "bogus_val")),
doc(f("id", S_ONE_PRE + "12")),
@ -562,10 +555,9 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
doc(f("id", S_TWO_PRE + "29"), f("foo_i", "bogus_val")),
doc(f("id", S_ONE_PRE + "10")), // may be skipped, more then 10 fails
doc(f("id", S_TWO_PRE + "20")) // may be skipped, more then 10 fails
).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
).process(client)
);
{
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(),
@ -601,6 +593,7 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
err.getMessage().contains("bogus_val"));
}
}
assertEquals(0, client.commit().getStatus()); // need to force since update didn't finish
assertQueryDocIds(client, false
// explicitly failed
@ -621,7 +614,8 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
// many docs from diff shards, more then 10 from a single shard (two) should fail
try {
e = expectThrows(SolrException.class, "did not get a top level exception when more then 10 docs failed",
() -> {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
@ -634,12 +628,12 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
docs.add(doc(f("id", S_ONE_PRE + "x"))); // may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x"))); // may be skipped, more then 10 fails
rsp = update(params("update.chain", "tolerant-chain-max-errors-10",
update(params("update.chain", "tolerant-chain-max-errors-10",
"commit", "true"),
docs.toArray(new SolrInputDocument[docs.size()])).process(client);
});
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
{
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting ("+e.code()+"): " + e.toString(),
@ -676,8 +670,8 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
assertTrue("failed err msg didn't match expected value: " + err,
err.getMessage().contains("bogus_val"));
}
}
assertEquals(0, client.commit().getStatus()); // need to force since update didn't finish
assertQueryDocIds(client, true
, S_ONE_PRE + "z", S_ONE_PRE + "y", S_TWO_PRE + "z", S_TWO_PRE + "y" // first
@ -700,7 +694,9 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
// many docs from diff shards, more then 10 don't have any uniqueKey specified
try {
e = expectThrows(SolrException.class,
"did not get a top level exception when more then 10 docs mising uniqueKey",
() -> {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
@ -713,12 +709,12 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
docs.add(doc(f("id", S_ONE_PRE + "x"))); // may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x"))); // may be skipped, more then 10 fails
rsp = update(params("update.chain", "tolerant-chain-max-errors-10",
update(params("update.chain", "tolerant-chain-max-errors-10",
"commit", "true"),
docs.toArray(new SolrInputDocument[docs.size()])).process(client);
});
fail("did not get a top level exception when more then 10 docs mising uniqueKey: " + rsp.toString());
} catch (SolrException e) {
{
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting ("+e.code()+"): " + e.toString(),
@ -747,6 +743,7 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(),
11, actualKnownErrsCount);
}
assertEquals(0, client.commit().getStatus()); // need to force since update didn't finish
assertQueryDocIds(client, true
, S_ONE_PRE + "z", S_ONE_PRE + "y", S_TWO_PRE + "z", S_TWO_PRE + "y" // first
@ -857,17 +854,20 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
}
// attempt a request containing 4 errors of various types (add, delI, delQ) .. 1 too many
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10",
SolrException e = expectThrows(SolrException.class,
"did not get a top level exception when more then 4 updates failed",
() -> update(params("update.chain", "tolerant-chain-max-errors-10",
"maxErrors", "3",
"commit", "true"),
doc(f("id", docId22), f("foo_i", "bogus_val")))
.deleteById(docId1, -1L)
.deleteByQuery("malformed:[")
.deleteById(docId21, -1L)
.process(client);
fail("did not get a top level exception when more then 4 updates failed: " + rsp.toString());
} catch (SolrException e) {
.process(client)
);
{
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830

View File

@ -100,22 +100,19 @@ public class TestZkChroot extends SolrTestCaseJ4 {
System.setProperty("bootstrap_conf", "false");
System.setProperty("zkHost", zkServer.getZkHost() + chroot);
SolrZkClient zkClient = null;
try {
zkClient = new SolrZkClient(zkServer.getZkHost(),
AbstractZkTestCase.TIMEOUT);
try(SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(),
AbstractZkTestCase.TIMEOUT)) {
expectThrows(ZooKeeperException.class,
"did not get a top level exception when more then 4 updates failed",
() -> {
assertFalse("Path '" + chroot + "' should not exist before the test",
zkClient.exists(chroot, true));
cores = CoreContainer.createAndLoad(home);
fail("There should be a zk exception, as the initial path doesn't exist");
} catch (ZooKeeperException e) {
// expected
});
assertFalse("Path shouldn't have been created",
zkClient.exists(chroot, true));// check the path was not created
} finally {
if (cores != null) cores.shutdown();
if (zkClient != null) zkClient.close();
}
}

View File

@ -178,14 +178,9 @@ public class ZkCLITest extends SolrTestCaseJ4 {
// test put file
String[] args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
"putfile", "/solr.xml", SOLR_HOME + File.separator + "not-there.xml"};
try {
ZkCLI.main(args);
fail("Should have had a file not found exception");
} catch (FileNotFoundException fne) {
String msg = fne.getMessage();
assertTrue("Didn't find expected error message containing 'not-there.xml' in " + msg,
msg.indexOf("not-there.xml") != -1);
}
FileNotFoundException e = expectThrows(FileNotFoundException.class, () -> ZkCLI.main(args));
assertTrue("Didn't find expected error message containing 'not-there.xml' in " + e.getMessage(),
e.getMessage().indexOf("not-there.xml") != -1);
}
@Test
@ -332,11 +327,8 @@ public class ZkCLITest extends SolrTestCaseJ4 {
File file = createTempFile("newfile", null).toFile();
String[] args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
"getfile", getNode, file.getAbsolutePath()};
try {
ZkCLI.main(args);
fail("Expected NoNodeException");
} catch (KeeperException.NoNodeException ex) {
}
KeeperException e = expectThrows(KeeperException.class, () -> ZkCLI.main(args));
assertEquals(e.code(), KeeperException.Code.NONODE);
}
@Test(expected = SolrException.class)

View File

@ -21,7 +21,6 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import junit.framework.Assert;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCmdExecutor;
@ -117,7 +116,8 @@ public class ZkSolrClientTest extends SolrTestCaseJ4 {
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT);
final SolrZkClient zkClientConLoss = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT);
zkClient = zkClientConLoss;
String shardsPath = "/collections/collection1/shards";
zkClient.makePath(shardsPath, false, true);
@ -129,12 +129,10 @@ public class ZkSolrClientTest extends SolrTestCaseJ4 {
Thread.sleep(80);
try {
zkClient.makePath("collections/collection2", false);
Assert.fail("Server should be down here");
} catch (KeeperException.ConnectionLossException e) {
}
expectThrows(KeeperException.class,
"Server should be down",
() -> zkClientConLoss.makePath("collections/collection2", false)
);
// bring server back up
server = new ZkTestServer(zkDir, zkServerPort);
@ -204,18 +202,14 @@ public class ZkSolrClientTest extends SolrTestCaseJ4 {
ZkCmdExecutor zkCmdExecutor = new ZkCmdExecutor(timeout);
final long start = System.nanoTime();
try {
expectThrows(KeeperException.SessionExpiredException.class, () -> {
zkCmdExecutor.retryOperation(() -> {
if (System.nanoTime() - start > TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) {
throw new KeeperException.SessionExpiredException();
}
throw new KeeperException.ConnectionLossException();
});
} catch(KeeperException.SessionExpiredException e) {
} catch (Exception e) {
fail("Expected " + KeeperException.SessionExpiredException.class.getSimpleName() + " but got " + e.getClass().getSimpleName());
}
});
} finally {
if (server != null) {
server.shutdown();
@ -334,31 +328,22 @@ public class ZkSolrClientTest extends SolrTestCaseJ4 {
zkClient.clean("/");
// should not work
try {
zkClient.makePath("/test/path/here", (byte[]) null, CreateMode.PERSISTENT, (Watcher) null, true, true, 1);
fail("We should not be able to create this path");
} catch (Exception e) {
}
KeeperException e =expectThrows(KeeperException.NoNodeException.class,
"We should not be able to create this path",
() -> zkClient.makePath("/test/path/here", (byte[]) null, CreateMode.PERSISTENT, (Watcher) null, true, true, 1));
zkClient.clean("/");
ZkCmdExecutor zkCmdExecutor = new ZkCmdExecutor(30000);
try {
zkCmdExecutor.ensureExists("/collection/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
fail("We should not be able to create this path");
} catch (Exception e) {
}
expectThrows(KeeperException.NoNodeException.class,
"We should not be able to create this path",
() -> zkCmdExecutor.ensureExists("/collection/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2));
zkClient.makePath("/collection", true);
try {
zkCmdExecutor.ensureExists("/collections/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2);
fail("We should not be able to create this path");
} catch (Exception e) {
}
expectThrows(KeeperException.NoNodeException.class,
"We should not be able to create this path",
() -> zkCmdExecutor.ensureExists("/collections/collection/leader", (byte[]) null, CreateMode.PERSISTENT, zkClient, 2));
zkClient.makePath("/collection/collection", true);
byte[] bytes = new byte[10];