Merge branch 'master' into LUCENE-8813

This commit is contained in:
Simon Willnauer 2019-05-31 21:05:41 +02:00
commit d488156921
61 changed files with 1541 additions and 332 deletions

View File

@ -67,6 +67,13 @@
</maintainer>
<!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
<release>
<Version>
<name>lucene-8.1.1</name>
<created>2019-05-28</created>
<revision>8.1.1</revision>
</Version>
</release>
<release>
<Version>
<name>lucene-8.1.0</name>

View File

@ -67,6 +67,13 @@
</maintainer>
<!-- NOTE: please insert releases in numeric order, NOT chronologically. -->
<release>
<Version>
<name>solr-8.1.1</name>
<created>2019-05-28</created>
<revision>8.1.1</revision>
</Version>
</release>
<release>
<Version>
<name>solr-8.1.0</name>

View File

@ -290,7 +290,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
"8.0.0-cfs",
"8.0.0-nocfs",
"8.1.0-cfs",
"8.1.0-nocfs"
"8.1.0-nocfs",
"8.1.1-cfs",
"8.1.1-nocfs"
};
public static String[] getOldNames() {
@ -299,7 +301,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
final static String[] oldSortedNames = {
"sorted.8.0.0",
"sorted.8.1.0"
"sorted.8.1.0",
"sorted.8.1.1"
};
public static String[] getOldSortedNames() {

View File

@ -87,7 +87,7 @@ New Features
* SOLR-11558: It would be nice if the Graph section of the Cloud tab in the Admin UI could give some more
information about the replicas of a collection (Erick Erickson)
* SOLR-13468: autoscaling/suggestions should be able to give suggestions from config sent as a payload (noble)
* SOLR-12304: The MoreLikeThisComponent now supports the mlt.interestingTerms parameter. Previously this option was
@ -97,6 +97,11 @@ New Features
* SOLR-13493: /autoscaling/suggestions to be able to filter by type (noble)
* SOLR-13494: Add DeepRandomStream implementation (Joel Bernstein)
* SOLR-13504: In autoscaling policies, use an explicit 'nodeset' attribute for filtering
nodes instead of using them directly at the toplevel (noble)
Bug Fixes
----------------------
@ -106,6 +111,8 @@ Bug Fixes
* SOLR-12941: Fix IndexSizeTrigger to correctly work with "aboveBytes" and "splitMethod=link" parameters. (ab)
* SOLR-13491: SolrZkClient's watch wrapper no longer allows zookeeper to hold the same watch object multiple times.
Other Changes
----------------------

View File

@ -69,11 +69,17 @@ public class LegacyNoFacetTest extends LegacyAbstractAnalyticsTest {
defaults.put("string_sd", "str0");
intTestStart = new ArrayList<>();
intMissing = 0;
longTestStart = new ArrayList<>();
longMissing = 0;
floatTestStart = new ArrayList<>();
floatMissing = 0;
doubleTestStart = new ArrayList<>();
doubleMissing = 0;
dateTestStart = new ArrayList<>();
dateMissing = 0;
stringTestStart = new ArrayList<>();
stringMissing = 0;
for (int j = 0; j < NUM_LOOPS; ++j) {
int i = j%INT;

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.util.NamedList;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -34,11 +35,19 @@ public class LegacyQueryFacetCloudTest extends LegacyAbstractAnalyticsFacetCloud
private static final int STRING = 7;
private static final int NUM_LOOPS = 100;
private static ArrayList<ArrayList<Integer>> int1TestStart = new ArrayList<>();
private static ArrayList<ArrayList<Integer>> int2TestStart = new ArrayList<>();
private static ArrayList<ArrayList<Long>> longTestStart = new ArrayList<>();
private static ArrayList<ArrayList<Float>> floatTestStart = new ArrayList<>();
private static final ArrayList<ArrayList<Integer>> int1TestStart = new ArrayList<>();
private static final ArrayList<ArrayList<Integer>> int2TestStart = new ArrayList<>();
private static final ArrayList<ArrayList<Long>> longTestStart = new ArrayList<>();
private static final ArrayList<ArrayList<Float>> floatTestStart = new ArrayList<>();
@After
public void afterTest() throws Exception {
int1TestStart.clear();
int2TestStart.clear();
longTestStart.clear();
floatTestStart.clear();
}
@Before
public void beforeTest() throws Exception {

View File

@ -67,9 +67,10 @@ public class SolrExporterIntegrationTest extends SolrExporterTestBase {
Map<String, Double> jvmMetrics = metricsWithName(
getAllMetrics(), "solr_metrics_jvm_threads");
// Include all thread states + plus overall count + number of daemon threads + number of deadlocked threads
assertEquals(NUM_NODES * (Thread.State.values().length + 3),
jvmMetrics.size());
// exact set of metrics can vary based on JVM impl (ie: windows)
// but there should always be at least one per known thread state per node...
assertTrue(jvmMetrics.toString(),
(NUM_NODES * Thread.State.values().length) < jvmMetrics.size());
}
@Test

View File

@ -48,6 +48,15 @@ public class BufferStore implements Store {
private final AtomicLong shardBuffercacheAllocate;
private final AtomicLong shardBuffercacheLost;
/**
* For testing only
*
* @lucene.internal
*/
static void clearBufferStores() {
bufferStores.clear();
}
public synchronized static void initNewBuffer(int bufferSize, long totalAmount) {
initNewBuffer(bufferSize, totalAmount, null);
}

View File

@ -56,7 +56,7 @@ public class TestCloudPhrasesIdentificationComponent extends SolrCloudTestCase {
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
private static final ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
@ -114,7 +114,7 @@ public class TestCloudPhrasesIdentificationComponent extends SolrCloudTestCase {
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
CLIENTS.clear();
}
public void testBasicPhrases() throws Exception {

View File

@ -58,7 +58,7 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase {
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
private static final ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
@ -114,7 +114,7 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase {
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
CLIENTS.clear();
}
public void testMultiValued() throws Exception {

View File

@ -66,7 +66,7 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static List<HttpSolrClient> CLIENTS = Collections.synchronizedList(new ArrayList<>(5));
private static final List<HttpSolrClient> CLIENTS = Collections.synchronizedList(new ArrayList<>(5));
/** Always included in fl so we can vet what doc we're looking at */
private static final FlValidator ID_VALIDATOR = new SimpleFieldValueValidator("id");
@ -159,7 +159,7 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
CLIENTS.clear();
}
/**

View File

@ -110,7 +110,7 @@ public class HttpTriggerListenerTest extends SolrCloudTestCase {
response = solrClient.request(req);
assertEquals(response.get("result").toString(), "success");
assertEquals(requests.toString(), 0, requests.size());
assertEquals(mockService.requests.toString(), 0, mockService.requests.size());
cluster.startJettySolrRunner();
cluster.waitForAllNodes(30);
@ -119,25 +119,25 @@ public class HttpTriggerListenerTest extends SolrCloudTestCase {
Thread.sleep(5000);
assertEquals(requests.toString(), 4, requests.size());
requests.forEach(s -> assertTrue(s.contains("Content-Type: application/json")));
requests.forEach(s -> assertTrue(s.contains("X-Foo: foo")));
requests.forEach(s -> assertTrue(s.contains("source=node_added_trigger")));
requests.forEach(s -> assertTrue(s.contains("type=NODEADDED")));
assertEquals(mockService.requests.toString(), 4, mockService.requests.size());
mockService.requests.forEach(s -> assertTrue(s.contains("Content-Type: application/json")));
mockService.requests.forEach(s -> assertTrue(s.contains("X-Foo: foo")));
mockService.requests.forEach(s -> assertTrue(s.contains("source=node_added_trigger")));
mockService.requests.forEach(s -> assertTrue(s.contains("type=NODEADDED")));
String request = requests.get(0);
String request = mockService.requests.get(0);
assertTrue(request, request.startsWith("/foo/test/STARTED"));
assertTrue(request, request.contains("actionName=,")); // empty actionName
request = requests.get(1);
request = mockService.requests.get(1);
assertTrue(request, request.startsWith("/foo/test/BEFORE_ACTION"));
assertTrue(request, request.contains("actionName=test,")); // actionName
request = requests.get(2);
request = mockService.requests.get(2);
assertTrue(request, request.startsWith("/foo/test/AFTER_ACTION"));
assertTrue(request, request.contains("actionName=test,")); // actionName
request = requests.get(3);
request = mockService.requests.get(3);
assertTrue(request, request.startsWith("/foo/test/SUCCEEDED"));
assertTrue(request, request.contains("actionName=,")); // empty actionName
}
@ -150,12 +150,10 @@ public class HttpTriggerListenerTest extends SolrCloudTestCase {
}
}
static List<String> requests = new ArrayList<>();
private static class MockService extends Thread {
Server server;
public final List<String> requests = new ArrayList<>();
private Server server;
public void start() {
server = new Server(new InetSocketAddress("localhost", 0));
server.setHandler(new AbstractHandler() {

View File

@ -57,7 +57,6 @@ import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.UpdateParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.Pair;

View File

@ -22,8 +22,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
@ -61,11 +59,9 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
private static final TimeSource timeSource = TimeSource.NANO_TIME;
static Map<String, List<CapturedEvent>> listenerEvents = new HashMap<>();
static CountDownLatch listenerCreated = new CountDownLatch(1);
static final Map<String, List<CapturedEvent>> listenerEvents = new HashMap<>();
private static CountDownLatch triggerFiredLatch;
private static int waitForSeconds = 1;
private static Set<TriggerEvent> events = ConcurrentHashMap.newKeySet();
@BeforeClass
public static void setupCluster() throws Exception {
@ -77,6 +73,7 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
CloudTestUtils.waitForTriggerToBeScheduled(cluster.getOpenOverseer().getSolrCloudManager(), ".scheduled_maintenance");
CloudTestUtils.suspendTrigger(cluster.getOpenOverseer().getSolrCloudManager(), ".scheduled_maintenance");
listenerEvents.clear();
triggerFiredLatch = new CountDownLatch(1);
}
@ -211,7 +208,6 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
@Override
public void process(TriggerEvent event, ActionContext context) throws Exception {
try {
events.add(event);
long currentTimeNanos = context.getCloudManager().getTimeSource().getTimeNs();
long eventTimeNanos = event.getEventTime();
long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS;
@ -231,7 +227,6 @@ public class MetricTriggerIntegrationTest extends SolrCloudTestCase {
@Override
public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
super.configure(loader, cloudManager, config);
listenerCreated.countDown();
timeSource = cloudManager.getTimeSource();
}

View File

@ -52,8 +52,8 @@ public class ScheduledTriggerIntegrationTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static CountDownLatch triggerFiredLatch;
private static Set<TriggerEvent> events = ConcurrentHashMap.newKeySet();
private static AtomicReference<Map<String, Object>> actionContextPropertiesRef = new AtomicReference<>();
private static final Set<TriggerEvent> events = ConcurrentHashMap.newKeySet();
private static final AtomicReference<Map<String, Object>> actionContextPropertiesRef = new AtomicReference<>();
@Before
public void setupCluster() throws Exception {
@ -71,6 +71,8 @@ public class ScheduledTriggerIntegrationTest extends SolrCloudTestCase {
@After
public void afterTest() throws Exception {
shutdownCluster();
events.clear();
actionContextPropertiesRef.set(null);
}
@Test

View File

@ -22,8 +22,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@ -52,18 +50,22 @@ import static org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.WAIT_FOR_
@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG;org.apache.solr.client.solrj.cloud.autoscaling=DEBUG")
public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
static Map<String, List<CapturedEvent>> listenerEvents = new HashMap<>();
static CountDownLatch listenerCreated = new CountDownLatch(1);
static boolean failDummyAction = false;
private static CountDownLatch actionConstructorCalled = new CountDownLatch(1);
private static CountDownLatch actionInitCalled = new CountDownLatch(1);
private static final int waitForSeconds = 1;
private static final Map<String, List<CapturedEvent>> listenerEvents = new HashMap<>();
private static CountDownLatch triggerFiredLatch = new CountDownLatch(1);
private static int waitForSeconds = 1;
private static AtomicBoolean triggerFired = new AtomicBoolean();
private static Set<TriggerEvent> events = ConcurrentHashMap.newKeySet();
private static final AtomicBoolean triggerFired = new AtomicBoolean();
private static final void resetTriggerAndListenerState() {
// reset the trigger and captured events
listenerEvents.clear();
triggerFiredLatch = new CountDownLatch(1);
triggerFired.compareAndSet(true, false);
}
@BeforeClass
public static void setupCluster() throws Exception {
resetTriggerAndListenerState();
configureCluster(2)
.addConfig("conf", configset("cloud-minimal"))
.configure();
@ -76,8 +78,6 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
@Test
public void testCooldown() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
failDummyAction = false;
waitForSeconds = 1;
String setTriggerCommand = "{" +
"'set-trigger' : {" +
"'name' : 'node_added_cooldown_trigger'," +
@ -105,9 +105,6 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
response = solrClient.request(req);
assertEquals(response.get("result").toString(), "success");
listenerCreated = new CountDownLatch(1);
listenerEvents.clear();
JettySolrRunner newNode = cluster.startJettySolrRunner();
cluster.waitForAllNodes(30);
boolean await = triggerFiredLatch.await(20, TimeUnit.SECONDS);
@ -121,10 +118,7 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
assertTrue(capturedEvents.toString(), capturedEvents.size() > 0);
long prevTimestamp = capturedEvents.get(capturedEvents.size() - 1).timestamp;
// reset the trigger and captured events
listenerEvents.clear();
triggerFiredLatch = new CountDownLatch(1);
triggerFired.compareAndSet(true, false);
resetTriggerAndListenerState();
JettySolrRunner newNode2 = cluster.startJettySolrRunner();
await = triggerFiredLatch.await(20, TimeUnit.SECONDS);
@ -153,10 +147,7 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
req = AutoScalingRequest.create(SolrRequest.METHOD.GET, null);
response = solrClient.request(req);
// reset the trigger and captured events
listenerEvents.clear();
triggerFiredLatch = new CountDownLatch(1);
triggerFired.compareAndSet(true, false);
resetTriggerAndListenerState();
JettySolrRunner newNode3 = cluster.startJettySolrRunner();
await = triggerFiredLatch.await(20, TimeUnit.SECONDS);
@ -187,14 +178,13 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
public static class TestTriggerAction extends TriggerActionBase {
public TestTriggerAction() {
actionConstructorCalled.countDown();
// No-Op
}
@Override
public void process(TriggerEvent event, ActionContext actionContext) {
try {
if (triggerFired.compareAndSet(false, true)) {
events.add(event);
long currentTimeNanos = actionContext.getCloudManager().getTimeSource().getTimeNs();
long eventTimeNanos = event.getEventTime();
long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS;
@ -214,7 +204,6 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
@Override
public void init() throws Exception {
log.info("TestTriggerAction init");
actionInitCalled.countDown();
super.init();
}
}
@ -224,7 +213,6 @@ public class TriggerCooldownIntegrationTest extends SolrCloudTestCase {
@Override
public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
super.configure(loader, cloudManager, config);
listenerCreated.countDown();
timeSource = cloudManager.getTimeSource();
}

View File

@ -25,6 +25,7 @@ import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.response.SolrQueryResponse;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -38,16 +39,24 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
initCore("solrconfig-minimal.xml", "schema.xml");
h.getCoreContainer().waitForLoadingCoresToFinish(30000);
// manually register some metrics in solr.jvm and solr.jetty - TestHarness doesn't init them
Counter c = h.getCoreContainer().getMetricManager().counter(null, "solr.jvm", "foo");
// manually register & seed some metrics in solr.jvm and solr.jetty for testing via handler
// (use "solrtest_" prefix just in case the jvm or jetty ads a "foo" metric at some point)
Counter c = h.getCoreContainer().getMetricManager().counter(null, "solr.jvm", "solrtest_foo");
c.inc();
c = h.getCoreContainer().getMetricManager().counter(null, "solr.jetty", "foo");
c = h.getCoreContainer().getMetricManager().counter(null, "solr.jetty", "solrtest_foo");
c.inc(2);
// test escapes
c = h.getCoreContainer().getMetricManager().counter(null, "solr.jetty", "foo:bar");
c = h.getCoreContainer().getMetricManager().counter(null, "solr.jetty", "solrtest_foo:bar");
c.inc(3);
}
@AfterClass
public static void cleanupMetrics() throws Exception {
h.getCoreContainer().getMetricManager().registry("solr.jvm" ).remove("solrtest_foo");
h.getCoreContainer().getMetricManager().registry("solr.jetty").remove("solrtest_foo");
h.getCoreContainer().getMetricManager().registry("solr.jetty").remove("solrtest_foo:bar");
}
@Test
public void test() throws Exception {
MetricsHandler handler = new MetricsHandler(h.getCoreContainer());
@ -257,7 +266,7 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
assertNotNull(val);
assertTrue(val instanceof Number);
String key3 = "solr.jetty:foo\\:bar";
String key3 = "solr.jetty:solrtest_foo\\:bar";
resp = new SolrQueryResponse();
handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json",
MetricsHandler.KEY_PARAM, key3), resp);

View File

@ -51,6 +51,8 @@ public class TestOverriddenPrefixQueryForCustomFieldType extends SolrTestCaseJ4
super.setUp();
clearIndex();
assertU(commit());
otherCounts=0;
counts = new int[2];
}
public void createIndex(int nDocs) {

View File

@ -101,7 +101,9 @@ public class RangeFacetCloudTest extends SolrCloudTestCase {
final int numDocs = atLeast(1000);
final int maxTermId = atLeast(TERM_VALUES_RANDOMIZER);
// clear the RANGE_MODEL
Arrays.fill(RANGE_MODEL, 0);
// seed the TERM_MODEL Maps so we don't have null check later
for (int i = 0; i < NUM_RANGE_VALUES; i++) {
TERM_MODEL[i] = new LinkedHashMap<>();

View File

@ -83,7 +83,7 @@ public class TestCloudJSONFacetJoinDomain extends SolrCloudTestCase {
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
private static final ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
@ -188,7 +188,7 @@ public class TestCloudJSONFacetJoinDomain extends SolrCloudTestCase {
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
CLIENTS.clear();
}
/** Sanity check that malformed requests produce errors */

View File

@ -94,7 +94,7 @@ public class TestCloudJSONFacetSKG extends SolrCloudTestCase {
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
private static final ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
@ -210,7 +210,7 @@ public class TestCloudJSONFacetSKG extends SolrCloudTestCase {
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
CLIENTS.clear();
}
/**

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCase;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -46,6 +47,11 @@ public class BufferStoreTest extends SolrTestCase {
store = BufferStore.instance(blockSize);
}
@After
public void clearBufferStores() {
BufferStore.clearBufferStores();
}
@Test
public void testBufferTakePut() {
byte[] b1 = store.takeBuffer(blockSize);

View File

@ -90,8 +90,8 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 {
private static final String parent = "parent_s";
private static final String type = "type_s";
private final static AtomicInteger counter = new AtomicInteger();
private static ExecutorService exe;
private static AtomicInteger counter = new AtomicInteger();
private static boolean cachedMode;
private static XMLInputFactory inputFactory;
@ -117,7 +117,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 {
rarely() ? ExecutorUtil.newMDCAwareFixedThreadPool(atLeast(2), new DefaultSolrThreadFactory("AddBlockUpdateTest")) : ExecutorUtil
.newMDCAwareCachedThreadPool(new DefaultSolrThreadFactory("AddBlockUpdateTest"));
counter.set(0);
initCore("solrconfig.xml", "schema15.xml");
}
@ -157,7 +157,6 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 {
exe = null;
inputFactory = null;
counter = null;
}
@Test

View File

@ -16,53 +16,51 @@
// specific language governing permissions and limitations
// under the License.
== Standard Aliases
Since version 6, SolrCloud has had the ability to query one or more collections via an alternative name. These
SolrCloud has the ability to query one or more collections via an alternative name. These
alternative names for collections are known as aliases, and are useful when you want to:
. Atomically switch to using a newly (re)indexed collection with zero down time (by re-defining the alias)
. Insulate the client programming versus changes in collection names
. Issue a single query against several collections with identical schemas
It's also possible to send update commands to aliases, but only to those that either resolve to a single collection
or those that define the routing between multiple collections (Routed Aliases). In other cases update commands are
There are two types of aliases: standard aliases and routed aliases. Within routed aliases, there are two types: category-routed aliases and time-routed aliases. These types are discussed in this section.
It's possible to send collection update commands to aliases, but only to those that either resolve to a single collection
or those that define the routing between multiple collections (<<Routed Aliases>>). In other cases update commands are
rejected with an error since there is no logic by which to distribute documents among the multiple collections.
== Standard Aliases
Standard aliases are created and updated using the <<collections-api.adoc#createalias,CREATEALIAS>> command.
The current list of collections that are members of an alias can be verified via the
<<collections-api.adoc#clusterstatus,CLUSTERSTATUS>> command.
The full definition of all aliases including metadata about that alias (in the case of routed aliases, see below)
can be verified via the <<collections-api.adoc#listaliases,LISTALIASES>> command.
Alternatively this information is available by checking `/aliases.json` in zookeeper via a zookeeper
Alternatively this information is available by checking `/aliases.json` in ZooKeeper with either the native ZooKeeper
client or in the <<cloud-screens.adoc#tree-view,tree page>> of the cloud menu in the admin UI.
Aliases may be deleted via the <<collections-api.adoc#deletealias,DELETEALIAS>> command.
The underlying collections are *unaffected* by this command.
When deleting an alias, underlying collections are *unaffected*.
TIP: Any alias (standard or routed) that references multiple collections may complicate relevancy.
By default, SolrCloud scores documents on a per shard basis.
By default, SolrCloud scores documents on a per-shard basis.
+
With multiple collections in an alias this is always a problem, so if you have a use case for which BM25 or
TF/IDF relevancy is important you will want to turn on one of the
<<distributed-requests.adoc#distributedidf,ExactStatsCache>> implementations.
However, for analytical use cases where results are sorted on numeric, date or alphanumeric field values rather
than relevancy calculations this is not a problem.
== Collection admin commands and aliases
Starting with version 8.1 SolrCloud supports using alias names in collection admin commands where normally a
collection name is expected. This works only when the following criteria are satisfied:
* an alias must not refer to more than one collection
* an alias must not refer to a Routed Alias (see below)
If all criteria are satisfied then the command will resolve alias names and operate on the collections the aliases
refer to as if it was invoked with the collection names instead. Otherwise the command will not be executed and
an exception will be thrown.
+
However, for analytical use cases where results are sorted on numeric, date, or alphanumeric field values, rather
than relevancy calculations, this is not a problem.
== Routed Aliases
To address the update limitations associated with standard aliases and provide additional useful features, the concept of
RoutedAliases has been developed.
There are presently two types of Routed Alias time routed and category routed. These are described in detail below,
routed aliases has been developed.
There are presently two types of routed alias: time routed and category routed. These are described in detail below,
but share some common behavior.
When processing an update for a routed alias, Solr initializes its
@ -75,20 +73,20 @@ RAUP, in coordination with the Overseer, is the main part of a routed alias, and
Ideally, as a user of a routed alias, you needn't concern yourself with the particulars of the collection naming pattern
since both queries and updates may be done via the alias.
When adding data, you should usually direct documents to the alias (e.g., reference the alias name instead of any collection).
The Solr server and CloudSolrClient will direct an update request to the first collection that an alias points to.
The Solr server and `CloudSolrClient` will direct an update request to the first collection that an alias points to.
Once the server receives the data it will perform the necessary routing.
WARNING: It is possible to update the collections
directly, but there is no safeguard against putting data in the incorrect collection if the alias is circumvented
in this manner.
CAUTION: It's probably a bad idea to use "data driven" mode with routed aliases, as duplicate schema mutations might happen
CAUTION: It is a bad idea to use "data driven" mode (aka <<schemaless-mode.adoc#schemaless-mode,schemaless-mode>>) with routed aliases, as duplicate schema mutations might happen
concurrently leading to errors.
== Time Routed Aliases
=== Time Routed Aliases
Starting in Solr 7.4, Time Routed Aliases (TRAs) are a SolrCloud feature that manages an alias and a time sequential
Time Routed Aliases (TRAs) are a SolrCloud feature that manages an alias and a time sequential
series of collections.
It automatically creates new collections and (optionally) deletes old ones as it routes documents to the correct
@ -99,10 +97,10 @@ This approach allows for indefinite indexing of data without degradation of perf
If you need to store a lot of timestamped data in Solr, such as logs or IoT sensor data, then this feature probably
makes more sense than creating one sharded hash-routed collection.
=== How It Works
==== How It Works
First you create a time routed aliases using the <<collections-api.adoc#createalias,CREATEALIAS>> command with some
router settings.
First you create a time routed aliases using the <<collections-api.adoc#createalias,CREATEALIAS>> command with the
desired router settings.
Most of the settings are editable at a later time using the <<collections-api.adoc#aliasprop,ALIASPROP>> command.
The first collection will be created automatically, along with an alias pointing to it.
@ -111,16 +109,15 @@ The name of each collection is comprised of the TRA name and the start timestamp
truncated.
The collections list for a TRA is always reverse sorted, and thus the connection path of the request will route to the
lead collection. Using CloudSolrClient is preferable as it can reduce the number of underlying physical HTTP requests by one.
lead collection. Using `CloudSolrClient` is preferable as it can reduce the number of underlying physical HTTP requests by one.
If you know that a particular set of documents to be delivered is going to a particular older collection then you could
direct it there from the client side as an optimization but it's not necessary. CloudSolrClient does not (yet) do this.
direct it there from the client side as an optimization but it's not necessary. `CloudSolrClient` does not (yet) do this.
TRUP first reads TRA configuration from the alias properties when it is initialized. As it sees each document, it checks for
changes to TRA properties, updates its cached configuration if needed and then determines which collection the
RAUP first reads TRA configuration from the alias properties when it is initialized. As it sees each document, it checks for
changes to TRA properties, updates its cached configuration if needed, and then determines which collection the
document belongs to:
* If TRUP needs to send it to a time segment represented by a collection other than the one that
* If RAUP needs to send it to a time segment represented by a collection other than the one that
the client chose to communicate with, then it will do so using mechanisms shared with DUP.
Once the document is forwarded to the correct collection (i.e., the correct TRA time segment), it skips directly to
DUP on the target collection and continues normally, potentially being routed again to the correct shard & replica
@ -130,67 +127,71 @@ TRUP first reads TRA configuration from the alias properties when it is initiali
passes through to DUP. DUP does it's normal collection-level processing that may involve routing the document
to another shard & replica.
* If the time stamp on the document is more recent than the most recent TRA segment, then a new collection needs to be
* If the timestamp on the document is more recent than the most recent TRA segment, then a new collection needs to be
added at the front of the TRA.
TRUP will create this collection, add it to the alias and then forward the document to the collection it just created.
RAUP will create this collection, add it to the alias, and then forward the document to the collection it just created.
This can happen recursively if more than one collection needs to be created.
+
Each time a new collection is added, the oldest collections in the TRA are examined for possible deletion, if that has
been configured.
All this happens synchronously, potentially adding seconds to the update request and indexing latency.
+
If `router.preemptiveCreateMath` is configured and if the document arrives within this window then it will occur
asynchronously.
asynchronously. See <<collections-api.adoc#time-routed-alias-parameters,Time Routed Alias Parameters>> for more information.
Any other type of update like a commit or delete is routed by TRUP to all collections.
Any other type of update like a commit or delete is routed by RAUP to all collections.
Generally speaking, this is not a performance concern. When Solr receives a delete or commit wherein nothing is deleted
or nothing needs to be committed, then it's pretty cheap.
==== Limitations & Assumptions
=== Limitations & Assumptions
* Only *time* routed aliases are supported. If you instead have some other sequential number, you could fake it
* Only *time* routed aliases are supported. If you instead have some other sequential number, you could fake it
as a time (e.g., convert to a timestamp assuming some epoch and increment).
+
The smallest possible interval is one second.
No other routing scheme is supported, although this feature was developed with considerations that it could be
extended/improved to other schemes.
* The underlying collections form a contiguous sequence without gaps. This will not be suitable when there are
large gaps in the underlying data, as Solr will insist that there be a collection for each increment. This
is due in part on Solr calculating the end time of each interval collection based on the timestamp of
* The underlying collections form a contiguous sequence without gaps. This will not be suitable when there are
large gaps in the underlying data, as Solr will insist that there be a collection for each increment. This
is due in part to Solr calculating the end time of each interval collection based on the timestamp of
the next collection, since it is otherwise not stored in any way.
* Avoid sending updates to the oldest collection if you have also configured that old collections should be
automatically deleted. It could lead to exceptions bubbling back to the indexing client.
automatically deleted. It could lead to exceptions bubbling back to the indexing client.
== Category Routed Aliases
=== Category Routed Aliases
Starting in Solr 8.1, Category Routed Aliases (CRAs) are a feature to manage aliases and a set of dependent collections
Category Routed Aliases (CRAs) are a feature to manage aliases and a set of dependent collections
based on the value of a single field.
CRAs automatically create new collections but because the partitioning is on categorical information rather than continuous
numerically based values there's no logic for automatic deletion. This approach allows for simplified indexing of data
that must be segregated into collections for cluster management or security reasons.
=== How It Works
==== How It Works
First you create a time routed aliases using the <<collections-api.adoc#createalias,CREATEALIAS>> command with some
router settings.
First you create a category routed alias using the <<collections-api.adoc#createalias,CREATEALIAS>> command with the
desired router settings.
Most of the settings are editable at a later time using the <<collections-api.adoc#aliasprop,ALIASPROP>> command.
The alias will be created with a special place-holder collection which will always be named
`myAlias__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA__TEMP`. The first document indexed into the CRA
`myAlias\__CRA__NEW_CATEGORY_ROUTED_ALIAS_WAITING_FOR_DATA\__TEMP`. The first document indexed into the CRA
will create a second collection named `myAlias__CRA__foo` (for a routed field value of `foo`). The second document
indexed will cause the temporary place holder collection to be deleted. Thereafter collections will be created whenever
a new value for the field is encountered.
CAUTION: To guard against runaway collection creation options for limiting the total number of categories, and for
rejecting values that don't match a regular expression are provided (see <<collections-api.adoc#createalias,CREATEALIAS>> for
details). Note that by providing very large or very permissive values for these options you are accepting the risk that
rejecting values that don't match, a regular expression parameter is provided (see <<collections-api.adoc#category-routed-alias-parameters,Category Routed Alias Parameters>> for
details).
+
Note that by providing very large or very permissive values for these options you are accepting the risk that
garbled data could potentially create thousands of collections and bring your cluster to a grinding halt.
Please note that the values (and thus the collection names) are case sensitive. As elsewhere in Solr manipulation and
cleaning of the data is expected to be done by external processes before data is sent to Solr with one exception.
Field values (and thus the collection names) are case sensitive.
As elsewhere in Solr, manipulation and
cleaning of the data is expected to be done by external processes before data is sent to Solr, with one exception.
Throughout Solr there are limitations on the allowable characters in collection names. Any characters other than ASCII
alphanumeric characters (`A-Za-z0-9`), hyphen (`-`) or underscore (`_`) are replaced with an underscore when calculating
the collection name for a category. For a CRA named `myAlias` the following table shows how collection names would be
@ -229,23 +230,24 @@ Unlike time routed aliases, there is no way to predict the next value so such pa
There is no automated means of removing a category. If a category needs to be removed from a CRA
the following procedure is recommended:
// TODO: This should have example instructions
. Ensure that no documents with the value corresponding to the category to be removed will be sent
either by stopping indexing or by fixing the incoming data stream
. Modify the alias definition in zookeeper, removing the collection corresponding to the category.
. Modify the alias definition in ZooKeeper, removing the collection corresponding to the category.
. Delete the collection corresponding to the category. Note that if the collection is not removed
from the alias first, this step will fail.
=== Limitations & Assumptions
==== Limitations & Assumptions
* CRAs are presently unsuitable for non-english data values due to the limits on collection names.
This can be worked around by duplicating the route value to a *_url safe_* base 64 encoded field
* CRAs are presently unsuitable for non-English data values due to the limits on collection names.
This can be worked around by duplicating the route value to a *_url safe_* Base64-encoded field
and routing on that value instead.
* The check for the __CRA__ infix is independent of the regular expression validation and occurs after
the name of the collection to be created has been calculated. It may not be avoided and is necessary
to support future features.
== Improvement Possibilities
=== Improvement Possibilities
Routed aliases are a relatively new feature of SolrCloud that can be expected to be improved.
Some _potential_ areas for improvement that _are not implemented yet_ are:
@ -255,11 +257,11 @@ Some _potential_ areas for improvement that _are not implemented yet_ are:
* *TRAs*: Ways to automatically optimize (or reduce the resources of) older collections that aren't expected to receive more
updates, and might have less search demand.
* *CRAs*: Intrinsic support for non-english text via base64 encoding
* *CRAs*: Intrinsic support for non-English text via Base64 encoding.
* *CRAs*: Supply an initial list of values for cases where these are known before hand to reduce pauses during indexing
* *CRAs*: Supply an initial list of values for cases where these are known before hand to reduce pauses during indexing.
* CloudSolrClient could route documents to the correct collection based on the route value instead always picking the
* `CloudSolrClient` could route documents to the correct collection based on the route value instead always picking the
latest/first.
* Presently only updates are routed and queries are distributed to all collections in the alias, but future
@ -275,3 +277,14 @@ Some _potential_ areas for improvement that _are not implemented yet_ are:
create more collections than expected during initial testing. Removing them after such events is overly tedious.
As always, patches and pull requests are welcome!
== Collection Commands and Aliases
Starting with version 8.1 SolrCloud supports using alias names in collection commands where normally a
collection name is expected. This works only when the following criteria are satisfied:
* an alias must not refer to more than one collection
* an alias must not refer to a <<Routed Aliases,Routed Alias>> (see below)
If all criteria are satisfied then the command will resolve alias names and operate on the collections the aliases
refer to as if it was invoked with the collection names instead. Otherwise the command will not be executed and
an exception will be thrown.

View File

@ -25,7 +25,7 @@ An audit trail (also called audit log) is a security-relevant chronological reco
== Configuration in security.json
Audit logging is configured in `security.json` under the `auditlogging` key.
The example `security.json` below configures synchronous audit logging to Solr default log file.
The example `security.json` below configures synchronous audit logging to Solr default log file.
[source,json]
----
@ -55,7 +55,7 @@ By default any AuditLogger plugin configured will log asynchronously in the back
The defaults are `async: true`, `blockAsync: false`, `queueSize: 4096`. The default for `numThreads` is 2, or if the server has more than 4 CPU-cores then we use CPU-cores/2.
[#audit-event-types]
=== Event types
=== Event Types
These are the event types triggered by the framework:
[%header,format=csv,separator=;]
@ -71,9 +71,9 @@ COMPLETED;Request completed successfully
ERROR;Request was not executed due to an error
|===
By default only the final event types `REJECTED`, `ANONYMOUS_REJECTED`, `UNAUTHORIZED`, `COMPLETED` and `ERROR` are logged. What eventTypes are logged can be configured with the `eventTypes` configuration parameter.
By default only the final event types `REJECTED`, `ANONYMOUS_REJECTED`, `UNAUTHORIZED`, `COMPLETED` and `ERROR` are logged. What eventTypes are logged can be configured with the `eventTypes` configuration parameter.
=== Muting certain events
=== Muting Certain Events
The configuration parameter `muteRules` lets you mute logging for certain events. You may specify multiple rules and combination of rules that will cause muting. You can mute by request type, username, collection name, path, request parameters or IP address. We'll explain through examples:
The below example will mute logging for all `SEARCH` requests as well as all requests made my user `johndoe` or from IP address `192.168.0.10`:
@ -95,7 +95,7 @@ An mute rule may also be a list, in which case all must be true for muting to ha
{
"auditlogging":{
"class": "solr.SolrLogAuditLoggerPlugin"
"muteRules": [
"muteRules": [
"ip:192.168.0.10", <1>
[ "path:/admin/collections", "param:action=LIST" ], <2>
[ "path:/admin/collections", "param:collection=test" ] <3>
@ -105,7 +105,7 @@ An mute rule may also be a list, in which case all must be true for muting to ha
----
<1> The first will mute all events from client IP `192.168.0.10`
<2> The second rule will mute collection admin requests with action=LIST
<2> The second rule will mute collection admin requests with `action=LIST`
<3> The third rule will mute collection admin requests for the collection named `test`
Note how you can mix single string rules with lists of rules that must all match:
@ -115,12 +115,12 @@ Note how you can mix single string rules with lists of rules that must all match
* `type:<request-type>` (request-type by name: `ADMIN`, `SEARCH`, `UPDATE`, `STREAMING`, `UNKNOWN`)
* `collection:<collection-name>` (collection by name)
* `user:<userid>` (user by userid)
* `path:</path/to/handler>` (request path relative to `/solr` or for search/update requests relative to collection. Path is prefix matched, i.e. `/admin` will mute any sub path as well.
* `path:</path/to/handler>` (request path relative to `/solr` or for search/update requests relative to collection. Path is prefix matched, i.e., `/admin` will mute any sub path as well.
* `ip:<ip-address>` (IPv4-address)
* `param:<param>=<value>` (request parameter)
* `param:<param>=<value>` (request parameter)
=== Chaining multiple loggers
=== Chaining Multiple Loggers
Using the `MultiDestinationAuditLogger` you can configure multiple audit logger plugins in a chain, to log to multiple destinations, as follows:
[source,json]
@ -139,9 +139,9 @@ Using the `MultiDestinationAuditLogger` you can configure multiple audit logger
----
== Metrics
AuditLoggerPlugins record metrics about count and timing of log requests, as well as queue size for async loggers. The metrics keys are all recorded on the `SECURITY` category, and each metric name are prefixed with a scope of `/auditlogging` and the class name of the logger, e.g. `SolrLogAuditLoggerPlugin`. The individual metrics are:
AuditLoggerPlugins record metrics about count and timing of log requests, as well as queue size for async loggers. The metrics keys are all recorded on the `SECURITY` category, and each metric name are prefixed with a scope of `/auditlogging` and the class name of the logger, e.g., `SolrLogAuditLoggerPlugin`. The individual metrics are:
* `count` (type: meter. Records number and rate of audit logs done)
* `count` (type: meter. Records number and rate of audit logs done)
* `errors` (type: meter. Records number and rate of errors)
* `lost` (type: meter. Records number and rate of events lost due to queue full and `blockAsync=false`)
* `requestTimes` (type: timer. Records latency and perceniles for logging performance)
@ -151,4 +151,4 @@ AuditLoggerPlugins record metrics about count and timing of log requests, as wel
* `queuedTime` (type: timer. Records the amount of time events waited in queue. Adding this with requestTimes you get total time from event to logging complete)
* `async` (type: gauge. Tells whether this logger is in async mode)
TIP: If you expect a very high request rate and have a slow audit logger plugin, you may see that the `queueSize` and `queuedTime` metrics increase, and in worst case start dropping events and see an increase in `lost` count. In this case you may want to increas the `numThreads` setting.
TIP: If you expect a very high request rate and have a slow audit logger plugin, you may see that the `queueSize` and `queuedTime` metrics increase, and in worst case start dropping events and see an increase in `lost` count. In this case you may want to increas the `numThreads` setting.

View File

@ -54,7 +54,7 @@ There are several things defined in this file:
<2> The parameter `"blockUnknown":true` means that unauthenticated requests are not allowed to pass through.
<3> A user called 'solr', with a password `'SolrRocks'` has been defined.
<4> We override the `realm` property to display another text on the login prompt.
<5> The parameter `"forwardCredentials":false` means we let Solr's PKI authenticaion handle distributed request instead of forwarding the Basic Auth header.
<5> The parameter `"forwardCredentials":false` means we let Solr's PKI authenticaion handle distributed request instead of forwarding the Basic Auth header.
<6> The 'admin' role has been defined, and it has permission to edit security settings.
<7> The 'solr' user has been defined to the 'admin' role.
@ -170,7 +170,7 @@ curl --user solr:SolrRocks http://localhost:8983/api/cluster/security/authentica
====
--
The authentication realm defaults to `solr` and is displayed in the `WWW-Authenticate` HTTP header and in the Admin UI login page. To change the realm, set the `realm` property:
The authentication realm defaults to `solr` and is displayed in the `WWW-Authenticate` HTTP header and in the Admin UI login page. To change the realm, set the `realm` property:
[.dynamic-tabs]
--
@ -225,7 +225,7 @@ Alternatively, users can use SolrJ's `PreemptiveBasicAuthClientBuilderFactory` t
To enable this feature, users should set the following system property `-Dsolr.httpclient.builder.factory=org.apache.solr.client.solrj.impl.PreemptiveBasicAuthClientBuilderFactory`.
`PreemptiveBasicAuthClientBuilderFactory` allows applications to provide credentials in two different ways:
. The `basicauth` system property can be passed, containing the credentials directly (e.g. `-Dbasicauth=username:password`). This option is straightforward, but may expose the credentials in the command line, depending on how they're set.
. The `basicauth` system property can be passed, containing the credentials directly (e.g., `-Dbasicauth=username:password`). This option is straightforward, but may expose the credentials in the command line, depending on how they're set.
. The `solr.httpclient.config` system property can be passed, containing a path to a properties file holding the credentials. Inside this file the username and password can be specified as `httpBasicAuthUser` and `httpBasicAuthPassword`, respectively.
+
[source,bash]

View File

@ -43,7 +43,7 @@ image::images/cloud-screens/cloud-tree.png[image,width=487,height=250]
As an aid to debugging, the data shown in the "Tree" view can be exported locally using the following command `bin/solr zk ls -r /`
== ZK Status View
The "ZK Status" view gives an overview over the ZooKeeper servers or ensemble used by Solr. It lists whether running in `standalone` or `ensemble` mode, shows how many zookeepers are configured, and then displays a table listing detailed monitoring status for each of the zookeepers, including who is the leader, configuration parameters and more.
The "ZK Status" view gives an overview over the ZooKeeper servers or ensemble used by Solr. It lists whether running in `standalone` or `ensemble` mode, shows how many ZooKeeper nodes are configured, and then displays a table listing detailed monitoring status for each node, including who is the leader, configuration parameters, and more.
image::images/cloud-screens/cloud-zkstatus.png[image,width=512,height=509]

View File

@ -87,7 +87,7 @@ A `false` value makes the results of a collection creation predictable and gives
This parameter is ignored if `createNodeSet` is not also specified.
`collection.configName`::
Defines the name of the configuration (which *must already be stored in ZooKeeper*) to use for this collection. If not provided, Solr will use the configuration of `_default` configSet to create a new (and mutable) configSet named `<collectionName>.AUTOCREATED` and will use it for the new collection. When such a collection (that uses a copy of the _default configset) is deleted, the autocreated configset is not deleted by default.
Defines the name of the configuration (which *must already be stored in ZooKeeper*) to use for this collection. If not provided, Solr will use the configuration of `_default` configset to create a new (and mutable) configset named `<collectionName>.AUTOCREATED` and will use it for the new collection. When such a collection (that uses a copy of the _default configset) is deleted, the autocreated configset is not deleted by default.
`router.field`::
If this parameter is specified, the router will look at the value of the field in an input document to compute the hash and identify a shard instead of looking at the `uniqueKey` field. If the field specified is null in the document, the document will be rejected.
@ -204,7 +204,7 @@ The attributes that can be modified are:
See the <<create,CREATE action>> section above for details on these attributes.
[[readonlymode]]
==== Read-only mode
==== Read-Only Mode
Setting the `readOnly` attribute to `true` puts the collection in read-only mode,
in which any index update requests are rejected. Other collection-level actions (eg. adding /
removing / moving replicas) are still available in this mode.
@ -225,7 +225,7 @@ Removing the `readOnly` property or setting it to false enables the
processing of updates and reloads the collection.
[[reindexcollection]]
== REINDEXCOLLECTION: Re-index a Collection
== REINDEXCOLLECTION: Re-Index a Collection
`/admin/collections?action=REINDEXCOLLECTION&name=_name_`
@ -1693,7 +1693,6 @@ http://localhost:8983/solr/admin/collections?action=COLSTATUS&collection=getting
"total": 49153,
"postings [PerFieldPostings(segment=_i formats=1)]": {
"total": 31023,
...
"fields": {
"dc": {
"flags": "I-----------",
@ -1713,11 +1712,10 @@ http://localhost:8983/solr/admin/collections?action=COLSTATUS&collection=getting
"dc.date": {
"flags": "-Dsrn-------:1:1:8",
"schemaType": "pdates"
},
...
}
}}}}}}}}}}}
----
[[migrate]]
== MIGRATE: Migrate Documents to Another Collection
@ -2651,7 +2649,7 @@ Backs up Solr collections and associated configurations to a shared filesystem -
`/admin/collections?action=BACKUP&name=myBackupName&collection=myCollectionName&location=/path/to/my/shared/drive`
The BACKUP command will backup Solr indexes and configurations for a specified collection. The BACKUP command takes one copy from each shard for the indexes. For configurations, it backs up the configSet that was associated with the collection and metadata.
The BACKUP command will backup Solr indexes and configurations for a specified collection. The BACKUP command takes one copy from each shard for the indexes. For configurations, it backs up the configset that was associated with the collection and metadata.
=== BACKUP Parameters
@ -2678,7 +2676,7 @@ The RESTORE operation will create a collection with the specified name in the co
The collection created will be have the same number of shards and replicas as the original collection, preserving routing information, etc. Optionally, you can override some parameters documented below.
While restoring, if a configSet with the same name exists in ZooKeeper then Solr will reuse that, or else it will upload the backed up configSet in ZooKeeper and use that.
While restoring, if a configset with the same name exists in ZooKeeper then Solr will reuse that, or else it will upload the backed up configset in ZooKeeper and use that.
You can use the collection <<createalias,CREATEALIAS>> command to make sure clients don't need to change the endpoint to query or index against the newly restored collection.

View File

@ -58,9 +58,9 @@ Note that this command is the only one of the Core Admin API commands that *does
====
Your CREATE call must be able to find a configuration, or it will not succeed.
When you are running SolrCloud and create a new core for a collection, the configuration will be inherited from the collection. Each collection is linked to a configName, which is stored in ZooKeeper. This satisfies the config requirement. There is something to note, though if you're running SolrCloud, you should *NOT* be using the CoreAdmin API at all. Use the <<collections-api.adoc#collections-api,Collections API>>.
When you are running SolrCloud and create a new core for a collection, the configuration will be inherited from the collection. Each collection is linked to a configName, which is stored in ZooKeeper. This satisfies the configuration requirement. There is something to note, though: if you're running SolrCloud, you should *NOT* use the CoreAdmin API at all. Use the <<collections-api.adoc#collections-api,Collections API>>.
When you are not running SolrCloud, if you have <<config-sets.adoc#config-sets,Config Sets>> defined, you can use the configSet parameter as documented below. If there are no configsets, then the `instanceDir` specified in the CREATE call must already exist, and it must contain a `conf` directory which in turn must contain `solrconfig.xml`, your schema (usually named either `managed-schema` or `schema.xml`), and any files referenced by those configs.
When you are not running SolrCloud, if you have <<config-sets.adoc#config-sets,Config Sets>> defined, you can use the `configSet` parameter as documented below. If there are no configsets, then the `instanceDir` specified in the CREATE call must already exist, and it must contain a `conf` directory which in turn must contain `solrconfig.xml`, your schema (usually named either `managed-schema` or `schema.xml`), and any files referenced by those configs.
The config and schema filenames can be specified with the `config` and `schema` parameters, but these are expert options. One thing you could do to avoid creating the `conf` directory is use `config` and `schema` parameters that point at absolute paths, but this can lead to confusing configurations unless you fully understand what you are doing.
====

View File

@ -61,7 +61,7 @@ The Document Builder provides a wizard-like interface to enter fields of a docum
The File Upload option allows choosing a prepared file and uploading it. If using `/update` for the Request-Handler option, you will be limited to XML, CSV, and JSON.
Other document types (e.g Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell). You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults. You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
Other document types (e.g., Word, PDF, etc.) can be indexed using the ExtractingRequestHandler (aka, Solr Cell). You must modify the RequestHandler to `/update/extract`, which must be defined in your `solrconfig.xml` file with your desired defaults. You should also add `&literal.id` shown in the "Extracting Request Handler Params" field so the file chosen is given a unique id.
More information can be found at: <<uploading-data-with-solr-cell-using-apache-tika.adoc#uploading-data-with-solr-cell-using-apache-tika,Uploading Data with Solr Cell using Apache Tika>>
== Solr Command

View File

@ -29,7 +29,7 @@ You can find `solr.xml` in your `$SOLR_HOME` directory (usually `server/solr`) i
<solr>
<int name="maxBooleanClauses">${solr.max.booleanClauses:1024}</int>
<solrcloud>
<str name="host">${host:}</str>
<int name="hostPort">${jetty.port:8983}</int>
@ -92,7 +92,7 @@ This attribute, when set to `true`, ensures that the multiple cores pointing to
Defines how many cores with `transient=true` that can be loaded before swapping the least recently used core for a new core.
`configSetBaseDir`::
The directory under which configSets for Solr cores can be found. Defaults to `$SOLR_HOME/configsets`.
The directory under which configsets for Solr cores can be found. Defaults to `$SOLR_HOME/configsets`.
[[global-maxbooleanclauses]]
`maxBooleanClauses`::

View File

@ -56,7 +56,7 @@ With the exception of in-place updates, the whole block must be updated or delet
=== Rudimentary Root-only Schemas
These schemas do not contain any other nested related fields apart from `\_root_`.
Many schemas in existence are this way simply because default configSets are this way, even if the application isn't using nested documents.
Many schemas in existence are this way simply because default configsets are this way, even if the application isn't using nested documents.
If an application uses nested documents with such a schema, keep in mind that that some related features aren't as effective since there is less information. Mainly the <<searching-nested-documents.adoc#child-doc-transformer,[child]>> transformer returns matching children in a flat list (not nested) and it's attached to the parent using the special field name `\_childDocuments_`.
With such a schema, typically you should have a field that differentiates a root doc from any nested children.
@ -150,4 +150,4 @@ Do *not* add a root document that has the same ID of a child document. _This wi
To delete a nested document, you can delete it by the ID of the root document.
If you try to use an ID of a child document, nothing will happen since only root document IDs are considered.
If you use Solr's delete-by-query APIs, you *have to be careful* to ensure that no children remain of any documents that are being deleted. _Doing otherwise will violate integrity assumptions that Solr expects._
If you use Solr's delete-by-query APIs, you *have to be careful* to ensure that no children remain of any documents that are being deleted. _Doing otherwise will violate integrity assumptions that Solr expects._

View File

@ -23,11 +23,11 @@ Queries and filters provided in JSON requests can be specified using a rich, pow
== Query DSL Structure
The JSON Request API accepts query values in three different formats:
* A valid <<the-standard-query-parser.adoc#the-standard-query-parser,query string>> that uses the default `deftype` (`lucene`, in most cases). e.g. `title:solr`.
* A valid <<the-standard-query-parser.adoc#the-standard-query-parser,query string>> that uses the default `deftype` (`lucene`, in most cases). e.g., `title:solr`.
* A valid <<local-parameters-in-queries.adoc#local-parameters-in-queries,local parameters query string>> that specifies its `deftype` explicitly. e.g. `{!dismax qf=title}solr`.
* A valid <<local-parameters-in-queries.adoc#local-parameters-in-queries,local parameters query string>> that specifies its `deftype` explicitly. e.g., `{!dismax qf=title}solr`.
* A valid JSON object with the name of the query parser and any relevant parameters. e.g. `{ "lucene": {"df":"title", "query":"solr"}}`.
* A valid JSON object with the name of the query parser and any relevant parameters. e.g., `{ "lucene": {"df":"title", "query":"solr"}}`.
** The top level "query" JSON block generally only has a single property representing the name of the query parser to use. The value for the query parser property is a child block containing any relevant parameters as JSON properties. The whole structure is analogous to a "local-params" query string. The query itself (often represented in local params using the name `v`) is specified with the key `query` instead.
All of these syntaxes can be used to specify queries for either the JSON Request API's `query` or `filter` properties.

View File

@ -281,7 +281,7 @@ Solr uses the *Noggit* JSON parser in its request API. Noggit is capable of mor
* Multi-line ("C style") comments can be inserted using `/\*` and `*/`
* strings can be single-quoted
* special characters can be backslash-escaped
* trailing (extra) commas are silently ignored (e.g. `[9,4,3,]`)
* trailing (extra) commas are silently ignored (e.g., `[9,4,3,]`)
* nbsp (non-break space, \u00a0) is treated as whitespace.
== Debugging

View File

@ -35,8 +35,9 @@ The simplest possible `security.json` for registering the plugin without configu
The plugin will NOT block anonymous traffic in this mode, since the default for `blockUnknown` is false. It is then possible to start configuring the plugin using REST API calls, which is described below.
== Configuration parameters
== Configuration Parameters
//TODO: standard is not to put parameters in tables but use labeled lists instead
[%header,format=csv,separator=;]
|===
Key ; Description ; Default
@ -51,16 +52,16 @@ iss ; Validates that the `iss` (issuer) claim equals this strin
aud ; Validates that the `aud` (audience) claim equals this string ; If `clientId` is configured, require `aud` to match it
requireSub ; Makes `sub` (subject) claim mandatory ; `true`
requireExp ; Makes `exp` (expiry time) claim mandatory ; `true`
algWhitelist ; JSON array with algorithms to accept: `HS256`, `HS384`, `HS512`, `RS256`, `RS384`, `RS512`, `ES256`, `ES384`, `ES512`, `PS256`, `PS384`, `PS512`, `none ; Default is to allow all algorithms
algWhitelist ; JSON array with algorithms to accept: `HS256`, `HS384`, `HS512`, `RS256`, `RS384`, `RS512`, `ES256`, `ES384`, `ES512`, `PS256`, `PS384`, `PS512`, `none ; Default is to allow all algorithms
jwkCacheDur ; Duration of JWK cache in seconds ; `3600` (1 hour)
principalClaim ; What claim id to pull principal from ; `sub`
claimsMatch ; JSON object of claims (key) that must match a regular expression (value). Example: `{ "foo" : "A|B" }` will require the `foo` claim to be either "A" or "B". ; (none)
adminUiScope ; Define what scope is requested when logging in from Admin UI ; If not defined, the first scope from `scope` parameter is used
authorizationEndpoint; The URL for the Id Provider's authorization endpoint ; Auto configured if `wellKnownUrl` is provided
redirectUris ; Valid location(s) for redirect after external authentication. Takes a string or array of strings. Must be the base URL of Solr, e.g. https://solr1.example.com:8983/solr/ and must match the list of redirect URIs registered with the Identity Provider beforehand. ; Defaults to empty list, i.e. any node is assumed to be a valid redirect target.
authorizationEndpoint; The URL for the Id Provider's authorization endpoint ; Auto configured if `wellKnownUrl` is provided
redirectUris ; Valid location(s) for redirect after external authentication. Takes a string or array of strings. Must be the base URL of Solr, e.g., https://solr1.example.com:8983/solr/ and must match the list of redirect URIs registered with the Identity Provider beforehand. ; Defaults to empty list, i.e., any node is assumed to be a valid redirect target.
|===
== More configuration examples
== More Configuration Examples
=== With JWK URL
To start enforcing authentication for all users, requiring a valid JWT in the `Authorization` header, you need to configure the plugin with one or more https://tools.ietf.org/html/rfc7517[JSON Web Key]s (JWK). This is a JSON document containing the key used to sign/encrypt the JWT. It could be a symmetric or asymmetric key. The JWK can either be fetched (and cached) from an external HTTPS endpoint or specified directly in `security.json`. Below is an example of the former:
@ -75,8 +76,8 @@ To start enforcing authentication for all users, requiring a valid JWT in the `A
}
----
=== With Admin UI support
The next example shows configuring using https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] with a well-known URI for automatic configuration of many common settings, including ability to use the Admin UI with an OpenID Connect enabled Identity Provider.
=== With Admin UI Support
The next example shows configuring using https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] with a well-known URI for automatic configuration of many common settings, including ability to use the Admin UI with an OpenID Connect enabled Identity Provider.
[source,json]
----
@ -89,11 +90,11 @@ The next example shows configuring using https://openid.net/specs/openid-connect
"redirectUri": "https://my.solr.server:8983/solr/"
}
}
----
----
In this case, `jwkUrl`, `iss` and `authorizationEndpoint` will be automatically configured from the fetched configuration.
=== Complex example
=== Complex Example
Let's look at a more complex configuration, this time with a static embedded JWK:
[source,json]
@ -137,7 +138,7 @@ Let's comment on this config:
All properties mentioned above can be set or changed using the Config Edit API. You can thus start with a simple configuration with only `class` configured and then configure the rest using the API.
=== Set a config Property
=== Set a Configuration Property
Set properties for the authentication plugin. Each of the configuration keys in the table above can be used as parameter keys for the `set-property` command.
@ -168,7 +169,7 @@ curl http://localhost:8983/api/cluster/security/authentication -H 'Content-type:
Insert a valid JWT access token in compact serialization format (`xxx.yyy.zzz` above) to authenticate with Solr once the plugin is active.
== Using clients with JWT Auth
== Using Clients with JWT Auth
[#jwt-soljr]
=== SolrJ
@ -182,13 +183,13 @@ To authenticate with Solr when using the cURL utility, supply a valid JWT access
[source,bash]
----
curl -H "Authorization: Bearer xxxxxx.xxxxxx.xxxxxx" http://localhost:8983/solr/admin/info/system
curl -H "Authorization: Bearer xxxxxx.xxxxxx.xxxxxx" http://localhost:8983/solr/admin/info/system
----
=== Admin UI
=== Admin UI
When this plugin is enabled, users will be redirected to a login page in the Admin UI once they attempt to do a restricted action. The page has a button that users will click and be redirected to the Identity Provider's login page. Once authenticated, the user will be redirected back to Solr Admin UI to the last known location. The session will last as long as the JWT token expiry time and is valid for one Solr server only. That means you have to login again when navigating to another Solr node. There is also a logout menu in the left column where user can explicitly log out.
== Using the Solr Control Script with JWT Auth
The control script (`bin/solr`) does not currently support JWT Auth.
The control script (`bin/solr`) does not currently support JWT Auth.

View File

@ -68,12 +68,12 @@ See the section <<solrcloud-autoscaling.adoc#solrcloud-autoscaling,SolrCloud Aut
== Configuration and Default Changes
=== New Default ConfigSet
Several changes have been made to configSets that ship with Solr; not only their content but how Solr behaves in regard to them:
Several changes have been made to configsets that ship with Solr; not only their content but how Solr behaves in regard to them:
* The `data_driven_configset` and `basic_configset` have been removed, and replaced by the `_default` configset. The `sample_techproducts_configset` also remains, and is designed for use with the example documents shipped with Solr in the `example/exampledocs` directory.
* When creating a new collection, if you do not specify a configSet, the `_default` will be used.
** If you use SolrCloud, the `_default` configSet will be automatically uploaded to ZooKeeper.
** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configSet as it's basis.
* When creating a new collection, if you do not specify a configset, the `_default` will be used.
** If you use SolrCloud, the `_default` configset will be automatically uploaded to ZooKeeper.
** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configset as it's basis.
=== Schemaless Improvements

View File

@ -132,7 +132,7 @@ Both these actions were and still are problematic. In-place-updates are safe tho
If you want to delete certain child documents and if you know they don't themselves have nested children
then you must do so with a delete-by-query technique.
* Solr has a new field in the `\_default` configSet, called `_nest_path_`. This field stores the path of the document
* Solr has a new field in the `\_default` configset, called `_nest_path_`. This field stores the path of the document
in the hierarchy for non-root documents.
See the sections <<indexing-nested-documents.adoc#indexing-nested-documents,Indexing Nested Documents>> and
@ -258,7 +258,7 @@ curl -X POST -H 'Content-type:application/json' --data-binary '
* A new command-line option is available via `bin/solr autoscaling` to calculate autoscaling policy suggestions and diagnostic information outside of the running Solr cluster. This option can use the existing autoscaling policy, or test the impact of a new one from a file located on the server filesystem.
+
These options have been documented in the section <<solrcloud-autoscaling-policy-preferences.adoc#testing-the-autoscaling-configuration-and-suggestions,Testing the Autoscaling Configuration and Suggestions>>.
These options have been documented in the section <<solrcloud-autoscaling-policy-preferences.adoc#testing-autoscaling-configuration-and-suggestions,Testing Autoscaling Configuration and Suggestions>>.
=== Dependency Updates in 8.0

View File

@ -57,7 +57,7 @@ This registry is returned at `solr.jvm` and includes the following information.
This registry is returned at `solr.node` and includes the following information. When making requests with the <<Metrics API>>, you can specify `&group=node` to limit to only these metrics.
* handler requests (count, timing): collections, info, admin, configSets, etc.
* handler requests (count, timing): collections, info, admin, configsets, etc.
* number of cores (loaded, lazy, unloaded)
=== Core (SolrCore) Registry

View File

@ -1,5 +1,4 @@
= Solr System Requirements
:page-toc: false
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
@ -25,6 +24,7 @@ You can install Solr in any system where a suitable Java Runtime Environment (JR
Solr is tested on several versions of Linux, macOS and Windows.
//TODO: this with the next big section on Java fragments the Java requirements too much. Consider merging sections.
=== Java Requirements
You will need the Java Runtime Environment (JRE) version 1.8 or higher. At a command line, check your Java version like this:
@ -42,61 +42,54 @@ The exact output will vary, but you need to make sure you meet the minimum versi
Some versions of Java VM have bugs that may impact your implementation. To be sure, check the page https://wiki.apache.org/lucene-java/JavaBugs[Lucene Java Bugs].
=== Sources for Java
Java is available from a number of providers. Lucene and Solr regularly test with https://jdk.java.net/[OpenJDK]/Oracle versions of Java. Some are free, others have a cost, some provide security patches and support, others do not. We recommend you read the article https://medium.com/@javachampions/java-is-still-free-2-0-0-6b9aa8d6d244[Java is still free by Java Champions] to help you decide.
Java is available from a number of providers. Lucene and Solr regularly test with https://jdk.java.net/[OpenJDK] nad Oracle versions of Java. Some are free, others have a cost, some provide security patches and support, others do not. We recommend you read the article https://medium.com/@javachampions/java-is-still-free-2-0-0-6b9aa8d6d244[Java is still free by Java Champions] to help you decide.
The Lucene project does not endorse any particular provider of Java.
NOTE: While we reference the Java Development (JDK) on this page, any Java Runtime Environment (JRE) associated with the referenced JDKs is acceptable.
== Java and Lucene/Solr combinations
Each Lucene/Solr release has an extensively tested minimum Java version. For instance the minimum Java version for Solr 8 is Java 8. This section is intended to provide guidance when running Lucene/Solr with a more recent Java version than the minimum specified.
== Java and Lucene/Solr Combinations
=== Executive summary
* OpenJDK/Oracle distribution are tested extensively and will continue to be tested going forward.
** Distributions of Java from other source are not regularly tested by our testing infrastructure, therefore you must test Java from those sources in your environment.
** For the purposes of Lucene and Solr, Oracle's paid version and OpenJDK are are identical.
Each Lucene/Solr release has an extensively tested minimum Java version. For instance the minimum Java version for Solr 8 is Java 8. This section provides guidance when running Lucene/Solr with a more recent Java version than the minimum specified.
* OpenJDK and Oracle Java distributions are tested extensively and will continue to be tested going forward.
** Distributions of Java from other sources are not regularly tested by our testing infrastructure, therefore you must test Java from those sources in your environment.
** For the purposes of Lucene and Solr, Oracle's Java and OpenJDK are identical.
* Upgrading Java is not required with the understanding that no Java bugs will be addressed unless you are using a version of Java that provides LTS.
* Java 8 has been extensively tested both in the field and by automated tests through Solr 8. LTS for Java 8 is provided by some sources, see https://medium.com/@javachampions/java-is-still-free-2-0-0-6b9aa8d6d244[Java is still free].
* Our testing infrastructure continuously tests with the minimum and greater versions of Java for each development branch.
* Java 9 and 10 have no Long Term Support (LTS). For this reason, Java 11 is preferred over 9 or 10 when upgrading Java.
* Java 8 has been extensively tested by both automated tests and users through Solr 8. Long Term Support (LTS) for Java 8 is provided by some sources, see https://medium.com/@javachampions/java-is-still-free-2-0-0-6b9aa8d6d244[Java is still free].
* The project's testing infrastructure continuously tests with the minimum and greater versions of Java for each development branch.
* Java 9 and 10 have no LTS. For this reason, Java 11 is preferred over 9 or 10 when upgrading Java.
* For specific questions the http://lucene.apache.org/solr/community.html#mailing-lists-irc[Solr User's List] is a great resource.
The rest of this page summarizes the Lucene/Solr community's testing experience for Lucene/Solr releases.
=== How we test Solr/Lucene
=== Project Testing of Java-Solr Combinations
Solr and Lucene run a continuous integration model, running automated unit and integration tests using several versions of Java. In addition, some organizations also maintain their own test infrastructure and feed their results back to the community.
Our continuous testing is against the two code lines under active development, Solr 8x and the future Solr 9.0:
* Lucene/Solr 8.x is the current stable branch and will have "point releases", i.e. 8.1, 8.2... until Lucene/Solr 9.0 is released.
** This version is currently tested against Java 8, 9, 10, 11, 12 and (pre-release) 13
* There is also development and testing with the future Lucene/Solr 9.0. There is no planned release date although the cadence has been between 1 and 2 years for major releases.
** This version will require Java 11, so it is currently tested against Java 11, 12 and (pre-release) 13
* Lucene/Solr 7x and earlier are not tested on a continuous basis.
* Lucene/Solr 8.x is the current stable release line and will have "point releases", i.e., 8.1, 8.2, etc. until Lucene/Solr 9.0 is released.
** Solr 8.x is currently tested against Java 8, 9, 10, 11, 12 and (pre-release) 13.
* There is also development and testing with the future Lucene/Solr 9.x release line, which will require Java 11 as a minimum version. This line is currently tested against Java 11, 12 and (pre-release) 13.
* Lucene/Solr 7.x and earlier release lines are not tested on a continuous basis.
=== Released Lucene/Solr and Java Versions
The success rate in our automated tests is similar with all the Java versions tested with the following caveats.
==== Lucene/Solr prior to 7.0
==== Lucene/Solr Prior to 7.0
* Lucene/Solr 7.0 was the first version that successfully passed our tests using Java 9+. Our best guidance is to avoid Java 9 or later for Lucene/Solr 6 or earlier
* Lucene/Solr 7.0 was the first version that successfully passed our tests using Java 9 and higher. You should avoid Java 9 or later for Lucene/Solr 6.x or earlier.
==== Lucene/Solr 7
==== Lucene/Solr 7.x
* Requires Java 8+
* This version had continuous testing with Java 9, 10, 11, 12 and the pre release version of Java 13. Regular testing stopped when Lucene/Solr 8.0 was released.
* Lucene/Solr 7.0 is the first release of Lucene/Solr that successfully completed our testing with Java 9+.
* Requires Java 8 or higher.
* This version had continuous testing with Java 9, 10, 11, 12 and the pre-release version of Java 13. Regular testing stopped when Lucene/Solr 8.0 was released.
* Hadoop with Java 9+ may not work in all situations, test in your environment.
* Kerberos with Java 9+ may not work in all situations, test in your environment.
* Be sure to test with SSL/TLS and/or authorization enabled in your environment if you require either when using Java 9+.
==== Lucene/Solr 8.x
==== Lucene/Solr 8
* Requires Java 8+
* This version has continuous testing with Java 9, 10, 11, 12 and the pre release version of Java 13.
* There were known issues with Kerberos with Java 9+ that will be addressed in Lucene/Solr 8.1, test in your environment.
* Requires Java 8 or higher.
* This version has continuous testing with Java 9, 10, 11, 12 and the pre-release version of Java 13.
* There were known issues with Kerberos with Java 9+ prior to Solr 8.1. If using 8.0, you should test in your environment.
* Be sure to test with SSL/TLS and/or authorization enabled in your environment if you require either when using Java 9+.

View File

@ -147,9 +147,9 @@ Please choose a configuration for the techproducts collection, available options
_default or sample_techproducts_configs [_default]
----
We've reached another point where we will deviate from the default option. Solr has two sample sets of configuration files (called a _configSet_) available out-of-the-box.
We've reached another point where we will deviate from the default option. Solr has two sample sets of configuration files (called a configset) available out-of-the-box.
A collection must have a configSet, which at a minimum includes the two main configuration files for Solr: the schema file (named either `managed-schema` or `schema.xml`), and `solrconfig.xml`. The question here is which configSet you would like to start with. The `_default` is a bare-bones option, but note there's one whose name includes "techproducts", the same as we named our collection. This configSet is specifically designed to support the sample data we want to use, so enter `sample_techproducts_configs` at the prompt and hit kbd:[enter].
A collection must have a configset, which at a minimum includes the two main configuration files for Solr: the schema file (named either `managed-schema` or `schema.xml`), and `solrconfig.xml`. The question here is which configset you would like to start with. The `_default` is a bare-bones option, but note there's one whose name includes "techproducts", the same as we named our collection. This configset is specifically designed to support the sample data we want to use, so enter `sample_techproducts_configs` at the prompt and hit kbd:[enter].
At this point, Solr will create the collection and again output to the screen the commands it issues.
@ -529,13 +529,13 @@ Solr's schema is a single file (in XML) that stores the details about the fields
Earlier in the tutorial we mentioned copy fields, which are fields made up of data that originated from other fields. You can also define dynamic fields, which use wildcards (such as `*_t` or `*_s`) to dynamically create fields of a specific field type. These types of rules are also defined in the schema.
****
When you initially started Solr in the first exercise, we had a choice of a configSet to use. The one we chose had a schema that was pre-defined for the data we later indexed. This time, we're going to use a configSet that has a very minimal schema and let Solr figure out from the data what fields to add.
When you initially started Solr in the first exercise, we had a choice of a configset to use. The one we chose had a schema that was pre-defined for the data we later indexed. This time, we're going to use a configset that has a very minimal schema and let Solr figure out from the data what fields to add.
The data you're going to index is related to movies, so start by creating a collection named "films" that uses the `_default` configSet:
The data you're going to index is related to movies, so start by creating a collection named "films" that uses the `_default` configset:
`bin/solr create -c films -s 2 -rf 2`
Whoa, wait. We didn't specify a configSet! That's fine, the `_default` is appropriately named, since it's the default and is used if you don't specify one at all.
Whoa, wait. We didn't specify a configset! That's fine, the `_default` is appropriately named, since it's the default and is used if you don't specify one at all.
We did, however, set two parameters `-s` and `-rf`. Those are the number of shards to split the collection across (2) and how many replicas to create (2). This is equivalent to the options we had during the interactive example from the first exercise.
@ -573,13 +573,13 @@ http://localhost:7574/solr/admin/collections?action=CREATE&name=films&numShards=
"core":"films_shard1_replica_n2"}}}
----
The first thing the command printed was a warning about not using this configSet in production. That's due to some of the limitations we'll cover shortly.
The first thing the command printed was a warning about not using this configset in production. That's due to some of the limitations we'll cover shortly.
Otherwise, though, the collection should be created. If we go to the Admin UI at http://localhost:8983/solr/#/films/collection-overview we should see the overview screen.
==== Preparing Schemaless for the Films Data
There are two parallel things happening with the schema that comes with the `_default` configSet.
There are two parallel things happening with the schema that comes with the `_default` configset.
First, we are using a "managed schema", which is configured to only be modified by Solr's Schema API. That means we should not hand-edit it so there isn't confusion about which edits come from which source. Solr's Schema API allows us to make changes to fields, field types, and other types of schema rules.
@ -896,7 +896,7 @@ Before you get started, create a new collection, named whatever you'd like. In t
`./bin/solr create -c localDocs -s 2 -rf 2`
Again, as we saw from Exercise 2 above, this will use the `_default` configSet and all the schemaless features it provides. As we noted previously, this may cause problems when we index our data. You may need to iterate on indexing a few times before you get the schema right.
Again, as we saw from Exercise 2 above, this will use the `_default` configset and all the schemaless features it provides. As we noted previously, this may cause problems when we index our data. You may need to iterate on indexing a few times before you get the schema right.
=== Indexing Ideas

View File

@ -448,7 +448,7 @@ Since there is no node that can host a replica for `shard2` without causing a vi
After re-issuing the `SecondCollection` CREATE command, the replica for `shard1` will be placed on "nodeA": it's least loaded, so is tested first, and no policy violation will result from placement there. The `shard2` replica could be placed on any of the 3 nodes, since they're all equally loaded, and the chosen node will remain below its maximum core count after placement. The CREATE command succeeds.
== Testing the autoscaling configuration and suggestions
== Testing Autoscaling Configuration and Suggestions
It's not always easy to predict the impact of autoscaling configuration changes on the
cluster layout. Starting with release 8.1 Solr provides a tool for assessing the impact of
such changes without affecting the state of the target cluster.

View File

@ -132,7 +132,7 @@ An example, to be included under `<config><updateHandler>` in `solrconfig.xml`,
</updateLog>
----
== Other options
== Other Options
In some cases complex updates (such as spatial/shape) may take very long time to complete. In the default
configuration other updates that fall into the same internal version bucket will wait indefinitely and
eventually these outstanding requests may pile up and lead to thread exhaustion and eventually to

View File

@ -115,9 +115,9 @@ as demonstrated by the examples below.
[NOTE]
====
.\_route_ Param
.\_route_ Parameter
To ensure each nested update is routed to its respective shard,
`\_route_` param must be set to the root document's ID when the
`\_route_` parameter must be set to the root document's ID when the
update does not have that root document.
====

View File

@ -56,12 +56,14 @@ import static org.apache.solr.common.util.Utils.toJSONString;
* Represents a set of conditions in the policy
*/
public class Clause implements MapWriter, Comparable<Clause> {
private static final Set<String> IGNORE_TAGS = new HashSet<>(Arrays.asList(REPLICA, COLLECTION, SHARD, "strict", "type"));
public static final String NODESET = "nodeset";
private static final Set<String> IGNORE_TAGS = new HashSet<>(Arrays.asList(REPLICA, COLLECTION, SHARD, "strict", "type", "put", NODESET));
private final int hashCode;
final boolean hasComputedValue;
final Map<String, Object> original;
final Clause derivedFrom;
private boolean nodeSetPresent = false;
Condition collection, shard, replica, tag, globalTag;
final Replica.Type type;
boolean strict;
@ -70,6 +72,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
this.original = clause.original;
this.hashCode = original.hashCode();
this.type = clause.type;
this.nodeSetPresent = clause.nodeSetPresent;
this.collection = clause.collection;
this.shard = clause.shard;
this.tag = evaluateValue(clause.tag, computedValueEvaluator);
@ -81,7 +84,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
}
// internal use only
Clause(Map<String, Object> original, Condition tag, Condition globalTag, boolean isStrict) {
Clause(Map<String, Object> original, Condition tag, Condition globalTag, boolean isStrict, boolean nodeSetPresent) {
this.hashCode = original.hashCode();
this.original = original;
this.tag = tag;
@ -91,6 +94,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
this.hasComputedValue = false;
this.strict = isStrict;
derivedFrom = null;
this.nodeSetPresent = nodeSetPresent;
}
private Clause(Map<String, Object> m) {
@ -117,7 +121,9 @@ public class Clause implements MapWriter, Comparable<Clause> {
}
this.replica = parse(REPLICA, m);
if (replica.op == WILDCARD) throw new IllegalArgumentException("replica val cannot be null" + toJSONString(m));
m.forEach(this::parseCondition);
this.nodeSetPresent = parseNodeset(m);
m.forEach((s, o) -> parseCondition(s, o, m));
}
if (tag == null)
throw new RuntimeException("Invalid op, must have one and only one tag other than collection, shard,replica " + toJSONString(m));
@ -131,9 +137,33 @@ public class Clause implements MapWriter, Comparable<Clause> {
hasComputedValue = hasComputedValue();
}
private boolean parseNodeset(Map<String, Object> m) {
if (!m.containsKey(NODESET)) return false;
Object o = m.get(NODESET);
if (o instanceof Map) {
Map map = (Map) o;
if (map.size() != 1) {
throwExp(m, "nodeset must only have one and only one key");
}
String key = (String) map.keySet().iterator().next();
parseCondition(key, o, m);
} else if (o instanceof List) {
List l = (List) o;
for (Object it : l) {
if (it instanceof String) continue;
else throwExp(m, "nodeset :[]must have only string values");
}
parseCondition("node", o, m);
} else {
throwExp(m, "invalid value for nodeset, must be an object or a list of String");
}
return true;
}
public Condition getThirdTag() {
return globalTag == null ? tag : globalTag;
}
private void doPostValidate(Condition... conditions) {
for (Condition condition : conditions) {
if (condition == null) continue;
@ -198,12 +228,12 @@ public class Clause implements MapWriter, Comparable<Clause> {
return globalTag == null;
}
void parseCondition(String s, Object o) {
void parseCondition(String s, Object o, Map m) {
if (IGNORE_TAGS.contains(s)) return;
if (tag != null) {
throw new IllegalArgumentException("Only one tag other than collection, shard, replica is possible");
throwExp(m, "Only one tag other than collection, shard, replica is possible");
}
tag = parse(s, singletonMap(s, o));
tag = parse(s, o instanceof Map? (Map<String, Object>) o : singletonMap(s, o));
}
private int compareTypes(Replica.Type t1, Replica.Type t2) {
@ -249,8 +279,8 @@ public class Clause implements MapWriter, Comparable<Clause> {
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Clause)) return false;
Clause that = (Clause)o;
return Objects.equals(this.original, that.original);
Clause that = (Clause) o;
return Objects.equals(this.original, that.original);
}
//replica value is zero
@ -265,14 +295,14 @@ public class Clause implements MapWriter, Comparable<Clause> {
new SealedClause(this, computedValueEvaluator);
}
Condition parse(String s, Map<String,Object> m) {
Condition parse(String s, Map<String, Object> m) {
Object expectedVal = null;
ComputedType computedType = null;
Object val = m.get(s);
Type varType = VariableBase.getTagType(s);
if (varType.meta.isHidden()) {
throwExp(m,"''{0}'' is not allowed", varType.tagName);
throwExp(m, "''{0}'' is not allowed", varType.tagName);
}
try {
String conditionName = s.trim();
@ -297,12 +327,12 @@ public class Clause implements MapWriter, Comparable<Clause> {
computedType = t;
strVal = changedVal;
if (varType == null || !varType.supportedComputedTypes.contains(computedType)) {
throwExp(m,"''{0}'' is not allowed for variable : ''{1}''",t,conditionName);
throwExp(m, "''{0}'' is not allowed for variable : ''{1}''", t, conditionName);
}
}
}
if (computedType == null && ((String) val).charAt(0) == '#' && !varType.wildCards.contains(val)) {
throwExp(m, "''{0}'' is not an allowed value for ''{1}'', supported value is : {2} ", val, conditionName, varType.wildCards );
throwExp(m, "''{0}'' is not an allowed value for ''{1}'', supported value is : {2} ", val, conditionName, varType.wildCards);
}
operand = varType == null ? operand : varType.getOperand(operand, strVal, computedType);
@ -318,16 +348,16 @@ public class Clause implements MapWriter, Comparable<Clause> {
} catch (IllegalArgumentException iae) {
throw iae;
} catch (Exception e) {
throwExp(m, "Invalid tag : {0} ",s );
throwExp(m, "Invalid tag : {0} ", s);
return null;
}
}
public void throwExp(Map<String, Object> clause, String msg, Object... args) {
throw new IllegalArgumentException("syntax error in clause :"+ toJSONString(clause)+ " , msg: "+ formatString(msg, args));
public static void throwExp(Map clause, String msg, Object... args) {
throw new IllegalArgumentException("syntax error in clause :" + toJSONString(clause) + " , msg: " + formatString(msg, args));
}
private List readListVal(Map m, List val, Type varType, String conditionName) {
private static List readListVal(Map m, List val, Type varType, String conditionName) {
List list = val;
list = (List) list.stream()
.map(it -> varType.validate(conditionName, it, true))
@ -360,7 +390,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
return list;
}
private Operand getOperand(String strVal) {
private static Operand getOperand(String strVal) {
Operand operand;
if (Policy.ANY.equals(strVal) || Policy.EACH.equals(strVal)) operand = WILDCARD;
else if (strVal.startsWith(NOT_EQUAL.operand)) operand = NOT_EQUAL;
@ -456,6 +486,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
public static long addReplicaCountsForNode = 0;
public static long addReplicaCountsForNodeCacheMiss = 0;
public static final String PERSHARD_REPLICAS = Clause.class.getSimpleName() + ".perShardReplicas";
private void addReplicaCountsForNode(ComputedValueEvaluator computedValueEvaluator, ReplicaCount replicaCount, Row node) {
addReplicaCountsForNode++;
@ -537,6 +568,10 @@ public class Clause implements MapWriter, Comparable<Clause> {
public List<Violation> test(Policy.Session session, double[] deviations) {
if (isPerCollectiontag()) {
if(nodeSetPresent) {
}
return tag.varType == Type.NODE ||
(tag.varType.meta.isNodeSpecificVal() && replica.computedType == null) ?
testPerNode(session, deviations) :
@ -618,7 +653,7 @@ public class Clause implements MapWriter, Comparable<Clause> {
* @param isRuleVal is this provided in the rule
* @return actual validated value
*/
public static Object validate(String name, Object val, boolean isRuleVal) {
public static Object validate(String name, Object val, boolean isRuleVal) {
if (val == null) return null;
Type info = VariableBase.getTagType(name);
if (info == null) throw new RuntimeException("Unknown type :" + name);

View File

@ -557,7 +557,7 @@ public class Policy implements MapWriter {
if (!withCollMap.isEmpty()) {
Clause withCollClause = new Clause((Map<String,Object>)Utils.fromJSONString("{withCollection:'*' , node: '#ANY'}") ,
new Condition(NODE.tagName, "#ANY", Operand.EQUAL, null, null),
new Condition(WITH_COLLECTION.tagName,"*" , Operand.EQUAL, null, null), true
new Condition(WITH_COLLECTION.tagName,"*" , Operand.EQUAL, null, null), true, false
);
expandedClauses.add(withCollClause);
}

View File

@ -43,7 +43,7 @@ public class Lang {
.withFunctionName("jdbc", JDBCStream.class)
.withFunctionName("topic", TopicStream.class)
.withFunctionName("commit", CommitStream.class)
.withFunctionName("random", RandomStream.class)
.withFunctionName("random", RandomFacadeStream.class)
.withFunctionName("knnSearch", KnnStream.class)
// decorator streams

View File

@ -0,0 +1,483 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.io.stream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import java.util.Random;
import java.util.LinkedList;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.io.Tuple;
import org.apache.solr.client.solrj.io.comp.StreamComparator;
import org.apache.solr.client.solrj.io.stream.expr.Explanation;
import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
import org.apache.solr.client.solrj.io.stream.expr.Expressible;
import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import static org.apache.solr.common.params.CommonParams.DISTRIB;
import static org.apache.solr.common.params.CommonParams.ROWS;
import static org.apache.solr.common.params.CommonParams.SORT;
/**
* Connects to Zookeeper to pick replicas from a specific collection to send the query to.
* Under the covers the SolrStream instances send the query to the replicas.
* SolrStreams are opened using a thread pool, but a single thread is used
* to iterate and merge Tuples from each SolrStream.
* @since 5.1.0
**/
public class DeepRandomStream extends TupleStream implements Expressible {
private static final long serialVersionUID = 1;
protected String zkHost;
protected String collection;
protected ModifiableSolrParams params;
protected Map<String, String> fieldMappings;
protected StreamComparator comp;
private boolean trace;
protected transient Map<String, Tuple> eofTuples;
protected transient CloudSolrClient cloudSolrClient;
protected transient List<TupleStream> solrStreams;
protected transient LinkedList<TupleWrapper> tuples;
protected transient StreamContext streamContext;
public DeepRandomStream() {
//Used by the RandomFacadeStream
}
/**
* @param zkHost Zookeeper ensemble connection string
* @param collectionName Name of the collection to operate on
* @param params Map&lt;String, String[]&gt; of parameter/value pairs
* @throws IOException Something went wrong
*/
public DeepRandomStream(String zkHost, String collectionName, SolrParams params) throws IOException {
init(collectionName, zkHost, params);
}
public DeepRandomStream(StreamExpression expression, StreamFactory factory) throws IOException{
// grab all parameters out
String collectionName = factory.getValueOperand(expression, 0);
List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
StreamExpressionNamedParameter aliasExpression = factory.getNamedOperand(expression, "aliases");
StreamExpressionNamedParameter zkHostExpression = factory.getNamedOperand(expression, "zkHost");
// Collection Name
if(null == collectionName){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - collectionName expected as first operand",expression));
}
// Validate there are no unknown parameters - zkHost and alias are namedParameter so we don't need to count it twice
if(expression.getParameters().size() != 1 + namedParams.size()){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - unknown operands found",expression));
}
// Named parameters - passed directly to solr as solrparams
if(0 == namedParams.size()){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - at least one named parameter expected. eg. 'q=*:*'",expression));
}
ModifiableSolrParams mParams = new ModifiableSolrParams();
for(StreamExpressionNamedParameter namedParam : namedParams){
if(!namedParam.getName().equals("zkHost") && !namedParam.getName().equals("aliases")){
mParams.add(namedParam.getName(), namedParam.getParameter().toString().trim());
}
}
// Aliases, optional, if provided then need to split
if(null != aliasExpression && aliasExpression.getParameter() instanceof StreamExpressionValue){
fieldMappings = new HashMap<>();
for(String mapping : ((StreamExpressionValue)aliasExpression.getParameter()).getValue().split(",")){
String[] parts = mapping.trim().split("=");
if(2 == parts.length){
fieldMappings.put(parts[0], parts[1]);
}
else{
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - alias expected of the format origName=newName",expression));
}
}
}
// zkHost, optional - if not provided then will look into factory list to get
String zkHost = null;
if(null == zkHostExpression){
zkHost = factory.getCollectionZkHost(collectionName);
if(zkHost == null) {
zkHost = factory.getDefaultZkHost();
}
}
else if(zkHostExpression.getParameter() instanceof StreamExpressionValue){
zkHost = ((StreamExpressionValue)zkHostExpression.getParameter()).getValue();
}
/*
if(null == zkHost){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - zkHost not found for collection '%s'",expression,collectionName));
}
*/
// We've got all the required items
init(collectionName, zkHost, mParams);
}
@Override
public StreamExpression toExpression(StreamFactory factory) throws IOException {
// function name
StreamExpression expression = new StreamExpression("random");
// collection
if(collection.indexOf(',') > -1) {
expression.addParameter("\""+collection+"\"");
} else {
expression.addParameter(collection);
}
for (Entry<String, String[]> param : params.getMap().entrySet()) {
for (String val : param.getValue()) {
// SOLR-8409: Escaping the " is a special case.
// Do note that in any other BASE streams with parameters where a " might come into play
// that this same replacement needs to take place.
expression.addParameter(new StreamExpressionNamedParameter(param.getKey(),
val.replace("\"", "\\\"")));
}
}
// zkHost
expression.addParameter(new StreamExpressionNamedParameter("zkHost", zkHost));
// aliases
if(null != fieldMappings && 0 != fieldMappings.size()){
StringBuilder sb = new StringBuilder();
for(Entry<String,String> mapping : fieldMappings.entrySet()){
if(sb.length() > 0){ sb.append(","); }
sb.append(mapping.getKey());
sb.append("=");
sb.append(mapping.getValue());
}
expression.addParameter(new StreamExpressionNamedParameter("aliases", sb.toString()));
}
return expression;
}
@Override
public Explanation toExplanation(StreamFactory factory) throws IOException {
StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString());
explanation.setFunctionName("random");
explanation.setImplementingClass(this.getClass().getName());
explanation.setExpressionType(ExpressionType.STREAM_SOURCE);
explanation.setExpression(toExpression(factory).toString());
// child is a datastore so add it at this point
StreamExplanation child = new StreamExplanation(getStreamNodeId() + "-datastore");
child.setFunctionName(String.format(Locale.ROOT, "solr (%s)", collection));
child.setImplementingClass("Solr/Lucene");
child.setExpressionType(ExpressionType.DATASTORE);
if(null != params){
ModifiableSolrParams mParams = new ModifiableSolrParams(params);
child.setExpression(mParams.getMap().entrySet().stream().map(e -> String.format(Locale.ROOT, "%s=%s", e.getKey(), e.getValue())).collect(Collectors.joining(",")));
}
explanation.addChild(child);
return explanation;
}
void init(String collectionName, String zkHost, SolrParams params) throws IOException {
this.zkHost = zkHost;
this.collection = collectionName;
this.params = new ModifiableSolrParams(params);
if (params.get("q") == null) {
throw new IOException("q param expected for search function");
}
if (params.getParams("fl") == null) {
throw new IOException("fl param expected for search function");
}
}
public void setFieldMappings(Map<String, String> fieldMappings) {
this.fieldMappings = fieldMappings;
}
public void setTrace(boolean trace) {
this.trace = trace;
}
public void setStreamContext(StreamContext context) {
this.streamContext = context;
}
public void open() throws IOException {
this.tuples = new LinkedList();
this.solrStreams = new ArrayList();
this.eofTuples = Collections.synchronizedMap(new HashMap());
constructStreams();
openStreams();
}
public List<TupleStream> children() {
return solrStreams;
}
public static Slice[] getSlices(String collectionName, ZkStateReader zkStateReader, boolean checkAlias) throws IOException {
ClusterState clusterState = zkStateReader.getClusterState();
Map<String, DocCollection> collectionsMap = clusterState.getCollectionsMap();
//TODO we should probably split collection by comma to query more than one
// which is something already supported in other parts of Solr
// check for alias or collection
List<String> allCollections = new ArrayList();
String[] collectionNames = collectionName.split(",");
for(String col : collectionNames) {
List<String> collections = checkAlias
? zkStateReader.getAliases().resolveAliases(col) // if not an alias, returns collectionName
: Collections.singletonList(collectionName);
allCollections.addAll(collections);
}
// Lookup all actives slices for these collections
List<Slice> slices = allCollections.stream()
.map(collectionsMap::get)
.filter(Objects::nonNull)
.flatMap(docCol -> Arrays.stream(docCol.getActiveSlicesArr()))
.collect(Collectors.toList());
if (!slices.isEmpty()) {
return slices.toArray(new Slice[slices.size()]);
}
// Check collection case insensitive
for(String collectionMapKey : collectionsMap.keySet()) {
if(collectionMapKey.equalsIgnoreCase(collectionName)) {
return collectionsMap.get(collectionMapKey).getActiveSlicesArr();
}
}
throw new IOException("Slices not found for " + collectionName);
}
protected void constructStreams() throws IOException {
try {
List<String> shardUrls = getShards(this.zkHost, this.collection, this.streamContext);
ModifiableSolrParams mParams = new ModifiableSolrParams(params);
mParams = adjustParams(mParams);
mParams.set(DISTRIB, "false"); // We are the aggregator.
String rows = mParams.get(ROWS);
int r = Integer.parseInt(rows);
int newRows = r/shardUrls.size();
mParams.set(ROWS, Integer.toString(newRows));
int seed = new Random().nextInt();
mParams.set(SORT, "random_"+Integer.toString(seed)+" asc");
int remainder = r - newRows*shardUrls.size();
for(String shardUrl : shardUrls) {
ModifiableSolrParams useParams = null;
if(solrStreams.size() == 0 && remainder > 0) {
useParams = new ModifiableSolrParams(mParams);
useParams.set(ROWS, newRows+remainder);
} else {
useParams = mParams;
}
SolrStream solrStream = new SolrStream(shardUrl, useParams);
if(streamContext != null) {
solrStream.setStreamContext(streamContext);
}
solrStream.setFieldMappings(this.fieldMappings);
solrStreams.add(solrStream);
}
} catch (Exception e) {
throw new IOException(e);
}
}
private void openStreams() throws IOException {
ExecutorService service = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("DeepRandomStream"));
try {
List<Future<TupleWrapper>> futures = new ArrayList();
for (TupleStream solrStream : solrStreams) {
StreamOpener so = new StreamOpener((SolrStream) solrStream, comp);
Future<TupleWrapper> future = service.submit(so);
futures.add(future);
}
try {
for (Future<TupleWrapper> f : futures) {
TupleWrapper w = f.get();
if (w != null) {
tuples.add(w);
}
}
} catch (Exception e) {
throw new IOException(e);
}
} finally {
service.shutdown();
}
}
public void close() throws IOException {
if(solrStreams != null) {
for (TupleStream solrStream : solrStreams) {
solrStream.close();
}
}
}
/** Return the stream sort - ie, the order in which records are returned */
public StreamComparator getStreamSort(){
return comp;
}
public Tuple read() throws IOException {
return _read();
}
protected Tuple _read() throws IOException {
if(tuples.size() > 0) {
TupleWrapper tw = tuples.removeFirst();
Tuple t = tw.getTuple();
if (trace) {
t.put("_COLLECTION_", this.collection);
}
if(tw.next()) {
tuples.addLast(tw);
}
return t;
} else {
Map m = new HashMap();
if(trace) {
m.put("_COLLECTION_", this.collection);
}
m.put("EOF", true);
return new Tuple(m);
}
}
protected class TupleWrapper implements Comparable<TupleWrapper> {
private Tuple tuple;
private SolrStream stream;
private StreamComparator comp;
public TupleWrapper(SolrStream stream, StreamComparator comp) {
this.stream = stream;
this.comp = comp;
}
public int compareTo(TupleWrapper w) {
if(this == w) {
return 0;
}
int i = comp.compare(tuple, w.tuple);
if(i == 0) {
return 1;
} else {
return i;
}
}
public boolean equals(Object o) {
return this == o;
}
public Tuple getTuple() {
return tuple;
}
public boolean next() throws IOException {
this.tuple = stream.read();
if(tuple.EOF) {
eofTuples.put(stream.getBaseUrl(), tuple);
}
return !tuple.EOF;
}
}
protected class StreamOpener implements Callable<TupleWrapper> {
private SolrStream stream;
private StreamComparator comp;
public StreamOpener(SolrStream stream, StreamComparator comp) {
this.stream = stream;
this.comp = comp;
}
public TupleWrapper call() throws Exception {
stream.open();
TupleWrapper wrapper = new TupleWrapper(stream, comp);
if(wrapper.next()) {
return wrapper;
} else {
return null;
}
}
}
protected ModifiableSolrParams adjustParams(ModifiableSolrParams params) {
return params;
}
}

View File

@ -0,0 +1,148 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.io.stream;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.solr.client.solrj.io.Tuple;
import org.apache.solr.client.solrj.io.comp.StreamComparator;
import org.apache.solr.client.solrj.io.stream.expr.Explanation;
import org.apache.solr.client.solrj.io.stream.expr.Expressible;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import static org.apache.solr.common.params.CommonParams.ROWS;
public class RandomFacadeStream extends TupleStream implements Expressible {
private TupleStream innerStream;
public RandomFacadeStream(StreamExpression expression, StreamFactory factory) throws IOException{
// grab all parameters out
String collectionName = factory.getValueOperand(expression, 0);
List<StreamExpressionNamedParameter> namedParams = factory.getNamedOperands(expression);
StreamExpressionNamedParameter zkHostExpression = factory.getNamedOperand(expression, "zkHost");
// Collection Name
if(null == collectionName){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - collectionName expected as first operand",expression));
}
// Named parameters - passed directly to solr as solrparams
if(0 == namedParams.size()){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - at least one named parameter expected. eg. 'q=*:*'",expression));
}
// pull out known named params
Map<String,String> params = new HashMap<String,String>();
for(StreamExpressionNamedParameter namedParam : namedParams){
if(!namedParam.getName().equals("zkHost") && !namedParam.getName().equals("buckets") && !namedParam.getName().equals("bucketSorts") && !namedParam.getName().equals("limit")){
params.put(namedParam.getName(), namedParam.getParameter().toString().trim());
}
}
// zkHost, optional - if not provided then will look into factory list to get
String zkHost = null;
if(null == zkHostExpression){
zkHost = factory.getCollectionZkHost(collectionName);
if(zkHost == null) {
zkHost = factory.getDefaultZkHost();
}
}
else if(zkHostExpression.getParameter() instanceof StreamExpressionValue){
zkHost = ((StreamExpressionValue)zkHostExpression.getParameter()).getValue();
}
if(null == zkHost){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - zkHost not found for collection '%s'",expression,collectionName));
}
if(params.get(ROWS) != null) {
int rows = Integer.parseInt(params.get(ROWS));
if(rows >= 5000) {
DeepRandomStream deepRandomStream = new DeepRandomStream();
deepRandomStream.init(collectionName, zkHost, toSolrParams(params));
this.innerStream = deepRandomStream;
} else {
RandomStream randomStream = new RandomStream();
randomStream.init(zkHost, collectionName, params);
this.innerStream = randomStream;
}
} else {
RandomStream randomStream = new RandomStream();
randomStream.init(zkHost, collectionName, params);
this.innerStream = randomStream;
}
}
private SolrParams toSolrParams(Map<String, String> props) {
ModifiableSolrParams sp = new ModifiableSolrParams();
for(String key : props.keySet()) {
sp.add(key, props.get(key));
}
return sp;
}
@Override
public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
return ((Expressible)innerStream).toExpression(factory);
}
@Override
public Explanation toExplanation(StreamFactory factory) throws IOException {
return innerStream.toExplanation(factory);
}
public void setStreamContext(StreamContext context) {
this.innerStream.setStreamContext(context);
}
public List<TupleStream> children() {
return innerStream.children();
}
public void open() throws IOException {
innerStream.open();
}
public void close() throws IOException {
innerStream.close();
}
public Tuple read() throws IOException {
return innerStream.read();
}
public int getCost() {
return 0;
}
@Override
public StreamComparator getStreamSort() {
return null;
}
}

View File

@ -66,6 +66,10 @@ public class RandomStream extends TupleStream implements Expressible {
protected transient CloudSolrClient cloudSolrClient;
private Iterator<SolrDocument> documentIterator;
public RandomStream() {
// Used by the RandomFacade
}
public RandomStream(String zkHost,
String collection,
Map<String, String> props) throws IOException {
@ -116,7 +120,7 @@ public class RandomStream extends TupleStream implements Expressible {
init(zkHost, collectionName, params);
}
private void init(String zkHost, String collection, Map<String, String> props) throws IOException {
void init(String zkHost, String collection, Map<String, String> props) throws IOException {
this.zkHost = zkHost;
this.props = props;
this.collection = collection;

View File

@ -16,6 +16,12 @@
*/
package org.apache.solr.common.cloud;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
@ -32,13 +38,6 @@ import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import org.apache.commons.io.FileUtils;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.common.SolrException;
@ -274,29 +273,8 @@ public class SolrZkClient implements Closeable {
* {@link #getData(String, org.apache.zookeeper.Watcher, org.apache.zookeeper.data.Stat, boolean)}.
*/
public Watcher wrapWatcher(final Watcher watcher) {
if (watcher == null || watcher instanceof SolrZkWatcher) return watcher;
return new SolrZkWatcher() {
@Override
public void process(final WatchedEvent event) {
log.debug("Submitting job to respond to event {}", event);
try {
if (watcher instanceof ConnectionManager) {
zkConnManagerCallbackExecutor.submit(() -> watcher.process(event));
} else {
zkCallbackExecutor.submit(() -> watcher.process(event));
}
} catch (RejectedExecutionException e) {
// If not a graceful shutdown
if (!isClosed()) {
throw e;
}
}
}
};
}
private interface SolrZkWatcher extends Watcher {
if (watcher == null || watcher instanceof ProcessWatchWithExecutor) return watcher;
return new ProcessWatchWithExecutor(watcher);
}
/**
@ -834,4 +812,56 @@ public class SolrZkClient implements Closeable {
public void downloadFromZK(String zkPath, Path dir) throws IOException {
ZkMaintenanceUtils.downloadFromZK(this, zkPath, dir);
}
/**
* Watcher wrapper that ensures that heavy implementations of process do not interfere with our ability
* to react to other watches, but also ensures that two wrappers containing equal watches are considered
* equal (and thus we won't accumulate multiple wrappers of the same watch).
*/
private final class ProcessWatchWithExecutor implements Watcher { // see below for why final.
private final Watcher watcher;
ProcessWatchWithExecutor(Watcher watcher) {
if (watcher == null) {
throw new IllegalArgumentException("Watcher must not be null");
}
this.watcher = watcher;
}
@Override
public void process(final WatchedEvent event) {
log.debug("Submitting job to respond to event {}", event);
try {
if (watcher instanceof ConnectionManager) {
zkConnManagerCallbackExecutor.submit(() -> watcher.process(event));
} else {
zkCallbackExecutor.submit(() -> watcher.process(event));
}
} catch (RejectedExecutionException e) {
// If not a graceful shutdown
if (!isClosed()) {
throw e;
}
}
}
// These overrides of hashcode/equals ensure that we don't store the same exact watch
// multiple times in org.apache.zookeeper.ZooKeeper.ZKWatchManager.dataWatches
// (a Map<String<Set<Watch>>). This class is marked final to avoid oddball
// cases with sub-classes, if you need different behavior, find a new class or make
// sure you account for the case where two diff sub-classes with different behavior
// for process(WatchEvent) and have been created with the same watch object.
@Override
public int hashCode() {
return watcher.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ProcessWatchWithExecutor) {
return this.watcher.equals(((ProcessWatchWithExecutor) obj).watcher);
}
return false;
}
}
}

View File

@ -86,8 +86,12 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.AD
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
public class TestPolicy extends SolrTestCaseJ4 {
boolean useNodeset ;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public TestPolicy(){
useNodeset = true;
}
static Suggester createSuggester(SolrCloudManager cloudManager, Map jsonObj, Suggester seed) throws IOException, InterruptedException {
Policy.Session session = null;
if (seed != null) session = seed.session;
@ -696,6 +700,20 @@ public class TestPolicy extends SolrTestCaseJ4 {
expectThrows(IllegalArgumentException.class,
() -> Clause.create("{cores: '>14%' , node:'#ANY'}"));
clause = Clause.create("{replica:1, nodeset : {sysprop.zone : east}}");
assertEquals(Variable.Type.STRING, clause.tag.varType);
clause =Clause.create("{replica:1, nodeset : [node1, node2, node3]}");
assertEquals(Variable.Type.NODE, clause.tag.varType);
assertEquals(Operand.IN, clause.tag.op);
expectThrows(IllegalArgumentException.class,
() -> Clause.create("{replica:1, node: n1, nodeset : {sysprop.zone : east}}"));
IllegalArgumentException exp = expectThrows(IllegalArgumentException.class,
() -> Clause.create("{replica:1, nodeset : {sysprop.zone : east , port: 8983 }}"));
assertTrue(exp.getMessage().contains("nodeset must only have one and only one key"));
clause = Clause.create("{'replica': '#ALL', 'nodeset': {'freedisk': '>700'}, 'strict': false}");
assertEquals(Operand.GREATER_THAN , clause.tag.op);
}
@ -976,6 +994,19 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}," +
" ]" +
"}");
if(useNodeset) {
policies = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 50}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset:{'nodeRole': 'overseer'}}" +
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}," +
" ]" +
"}");
}
AutoScalingConfig config = new AutoScalingConfig(policies);
Policy policy = config.getPolicy();
Policy.Session session = policy.createSession(provider);
@ -1011,6 +1042,19 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': '<3', 'shard': '#EACH', 'node': '#ANY'}," +
" ]" +
"}");
if(useNodeset){
policies = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 50}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset: {'nodeRole': 'overseer'}}" +
" { 'replica': '<3', 'shard': '#EACH', 'node': '#ANY'}," +
" ]" +
"}");
}
config = new AutoScalingConfig(policies);
policy = config.getPolicy();
session = policy.createSession(provider);
@ -1055,6 +1099,20 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': '<3', 'shard': 'shard2', 'node': '#ANY'}," +
" ]" +
"}");
if(useNodeset){
policies = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 50}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset:{'nodeRole': 'overseer'}}" +
" { 'replica': '<2', 'shard': 'shard1', 'node': '#ANY'}," +
" { 'replica': '<3', 'shard': 'shard2', 'node': '#ANY'}," +
" ]" +
"}");
}
config = new AutoScalingConfig(policies);
policy = config.getPolicy();
session = policy.createSession(provider);
@ -1167,6 +1225,21 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': 0, 'shard': '#EACH', sysprop.fs : '!slowdisk' , type : PULL }" +
" ]" +
"}");
if(useNodeset){
policies = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 50}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset : {'nodeRole': 'overseer'}}" +
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}," +
" { 'replica': 0, 'shard': '#EACH', nodeset : { sysprop.fs : '!ssd'}, type : TLOG }" +
" { 'replica': 0, 'shard': '#EACH', nodeset : {sysprop.fs : '!slowdisk'} , type : PULL }" +
" ]" +
"}");
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4, sysprop.fs: slowdisk}," +
"node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3, sysprop.fs: unknown }," +
@ -1276,6 +1349,23 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': 0, 'shard': '#EACH', sysprop.fs : '!slowdisk' , type : PULL }" +
" ]" +
"}");
if(useNodeset){
policies = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 1}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset : {'nodeRole': 'overseer'}}" +
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY', 'collection':'newColl'}," +
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY', 'collection':'newColl2', type : PULL}," +
" { 'replica': '<3', 'shard': '#EACH', 'node': '#ANY', 'collection':'newColl2'}," +
" { 'replica': 0, 'shard': '#EACH', nodeset:{ sysprop.fs : '!ssd'}, type : TLOG }" +
" { 'replica': 0, 'shard': '#EACH', nodeset : {sysprop.fs : '!slowdisk'} , type : PULL }" +
" ]" +
"}");
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4, sysprop.fs: slowdisk}," +
"node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3, sysprop.fs: unknown}," +
@ -1400,6 +1490,26 @@ public class TestPolicy extends SolrTestCaseJ4 {
" ]" +
" }" +
"}");
if(useNodeset){
map = (Map) Utils.fromJSONString("{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 50}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, nodeset: {'nodeRole': 'overseer'}}," +
" { 'replica': '<2', 'shard': '#EACH', 'node': '#ANY'}" +
" ]," +
" 'policies': {" +
" 'policy1': [" +
" { 'replica': '1', nodeset:{ 'sysprop.fs': 'ssd'}, 'shard': '#EACH'}," +
" { 'replica': '<2', 'shard': '#ANY', 'node': '#ANY'}," +
" { 'replica': '<2', 'shard': '#EACH',nodeset:{ 'sysprop.rack': 'rack1'}}" +
" ]" +
" }" +
"}");
}
Policy policy = new Policy(map);
List<Clause> clauses = Policy.mergePolicies("mycoll", policy.getPolicies().get("policy1"), policy.getClusterPolicy());
Collections.sort(clauses);
@ -1407,8 +1517,8 @@ public class TestPolicy extends SolrTestCaseJ4 {
assertEquals("1", String.valueOf(clauses.get(0).original.get("replica")));
assertEquals("0", String.valueOf(clauses.get(1).original.get("replica")));
assertEquals("#ANY", clauses.get(3).original.get("shard"));
assertEquals("rack1", clauses.get(2).original.get("sysprop.rack"));
assertEquals("overseer", clauses.get(1).original.get("nodeRole"));
assertEquals("rack1", clauses.get(2).tag.val);
assertEquals("overseer",clauses.get(1).tag.val);
}
public void testConditionsSort() {
@ -1419,6 +1529,16 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica':'<2', 'node':'#ANY', 'shard':'#EACH'}," +
" { 'replica':1, 'sysprop.rack':'rack1'}]" +
" }";
if(useNodeset){
rules = "{" +
" 'cluster-policy':[" +
" { 'nodeRole':'overseer', replica: 0, 'strict':false}," +
" { 'replica':'<1', 'node':'node3', 'shard':'#EACH'}," +
" { 'replica':'<2', 'node':'#ANY', 'shard':'#EACH'}," +
" { 'replica':1, nodeset: {'sysprop.rack':'rack1'}}]" +
" }";
}
Policy p = new Policy((Map<String, Object>) Utils.fromJSONString(rules));
List<Clause> clauses = new ArrayList<>(p.getClusterPolicy());
Collections.sort(clauses);
@ -1436,6 +1556,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
"{minimize:cores , precision:2}," +
"{maximize:freedisk, precision:50}, " +
"{minimize:heapUsage, precision:1000}]}";
if(useNodeset){
rules = "{" +
"cluster-policy:[" +
"{nodeset:{nodeRole:'overseer'},replica : 0 , strict:false}," +
"{replica:'<1',node:node3}," +
"{replica:'<2',node:'#ANY', shard:'#EACH'}]," +
" cluster-preferences:[" +
"{minimize:cores , precision:2}," +
"{maximize:freedisk, precision:50}, " +
"{minimize:heapUsage, precision:1000}]}";
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heapUsage:10480}," +
@ -1624,6 +1755,18 @@ public class TestPolicy extends SolrTestCaseJ4 {
" 'cluster-preferences':[" +
" {'minimize':'cores', 'precision':3}," +
" {'maximize':'freedisk','precision':100}]}";
if(useNodeset){
autoscaleJson = "{" +
" 'cluster-policy':[" +
" {'cores':'<10','node':'#ANY'}," +
" {'replica':'<3','shard':'#EACH','node':'#ANY'}," +
" { 'replica': 2, nodeset: {'sysprop.fs': 'ssd'}, 'shard': '#EACH'}," +//greedy condition
" {nodeset:{'nodeRole':'overseer'},'replica':'0'}]," +
" 'cluster-preferences':[" +
" {'minimize':'cores', 'precision':3}," +
" {'maximize':'freedisk','precision':100}]}";
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4}," +
"node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3}," +
@ -1667,6 +1810,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
" {'minimize':'cores', 'precision':3}," +
" {'maximize':'freedisk','precision':100}]}";
if(useNodeset){
autoscaleJson = "{" +
" 'cluster-policy':[" +
" {'cores':'<10','node':'#ANY'}," +
" {'replica':'<3','shard':'#EACH','node':'#ANY'}," +
" {nodeset: {'nodeRole':'overseer'},'replica':'0'}]," +
" 'cluster-preferences':[" +
" {'minimize':'cores', 'precision':3}," +
" {'maximize':'freedisk','precision':100}]}";
}
Map replicaInfoMap = (Map) Utils.fromJSONString("{ '127.0.0.1:60099_solr':{}," +
" '127.0.0.1:60089_solr':{'compute_plan_action_test':{'shard1':[" +
@ -1741,6 +1895,31 @@ public class TestPolicy extends SolrTestCaseJ4 {
"}" +
"}";
if(useNodeset){
rules = "{" +
"'cluster-preferences':[" +
"{'minimize':'cores','precision':2}," +
"{'maximize':'freedisk','precision':50}," +
"{'minimize':'heapUsage','precision':1000}" +
"]," +
"'cluster-policy':[" +
"{replica:0, nodeset:{'nodeRole':'overseer'},'strict':false}," +
"{'replica':'<1','node':'node3'}," +
"{'replica':'<2','node':'#ANY','shard':'#EACH'}" +
"]," +
"'policies':{" +
"'p1':[" +
"{replica:0, nodeset:{'nodeRole':'overseer'},'strict':false}," +
"{'replica':'<1','node':'node3'}," +
"{'replica':'<2','node':'#ANY','shard':'#EACH'}," +
"{'replica':'<3','shard':'#EACH', nodeset : { 'sysprop.rack':'#EACH'}}" +
"]" +
"}" +
"}";
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
"node1:{cores:12, freedisk: 334, heapUsage:10480, rack: rack4}," +
"node2:{cores:4, freedisk: 749, heapUsage:6873, rack: rack3}," +
@ -1928,6 +2107,26 @@ public class TestPolicy extends SolrTestCaseJ4 {
" ]" +
" }" +
"}";
if(useNodeset){
autoScaleJson = "{" +
" cluster-preferences: [" +
" { maximize : freedisk , precision: 50}," +
" { minimize : cores, precision: 2}" +
" ]," +
" cluster-policy: [" +
" { replica : '0' , nodeset: {nodeRole: overseer}}," +
" { replica: '<2', shard: '#ANY', node: '#ANY'" +
" }" +
" ]," +
" policies: {" +
" policy1: [" +
" { replica: '<2', shard: '#EACH', node: '#ANY'}," +
" { replica: '<2', shard: '#EACH', nodeset:{ sysprop.rack: rack1}}" +
" ]" +
" }" +
"}";
}
Map<String, Map> nodeValues = (Map<String, Map>) Utils.fromJSONString("{" +
@ -2003,6 +2202,15 @@ public class TestPolicy extends SolrTestCaseJ4 {
" cluster-policy:[{cores:'<10',node:'#ANY'}," +
" {replica:'<2', shard:'#EACH',node:'#ANY'}," +
" { nodeRole:overseer,replica:0}]}";
if(useNodeset){
autoScalingjson = "cluster-preferences:[" +
" {minimize : cores}," +
" {'maximize':freedisk , precision:100}], " +
" cluster-policy:[{cores:'<10',node:'#ANY'}," +
" {replica:'<2', shard:'#EACH',node:'#ANY'}," +
" {nodeset:{ nodeRole:overseer},replica:0}]}";
}
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
Policy.Session session = policy.createSession(cloudManagerWithData((Map) loadFromResource("testComputePlanAfterNodeAdded.json")));
Suggester suggester = session.getSuggester(CollectionParams.CollectionAction.MOVEREPLICA)
@ -2018,6 +2226,14 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { replica :'<2', node:'#ANY'}," +
" { nodeRole : overseer, replica :0}]," +
" cluster-preferences :[{ minimize : cores }]}";
if(useNodeset){
autoScalingjson = " { cluster-policy:[" +
" { cores :'<10', node :'#ANY'}," +
" { replica :'<2', node:'#ANY'}," +
" { nodeset:{nodeRole : overseer}, replica :0}]," +
" cluster-preferences :[{ minimize : cores }]}";
}
List<Suggester.SuggestionInfo> l = PolicyHelper.getSuggestions(new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson)),
cloudManagerWithData((Map) loadFromResource("testReplicaCountSuggestions.json")));
assertFalse(l.isEmpty());
@ -2123,6 +2339,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
public void testFreeDiskDeviation() {
Map map = (Map) loadFromResource("testFreeDiskDeviation.json");
AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) map.get("config"));
if(useNodeset){
cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString("{" +
" 'cluster-policy': [{'replica':'<2', 'shard':'#EACH', 'node':'#ANY'}," +
" {'replica': '#ALL', 'nodeset': {'freedisk': '>700'}, 'strict': false}]" +
" }"));
}
SolrCloudManager scm = cloudManagerWithData(map);
Suggester suggester = cfg.getPolicy()
.createSession(scm)
@ -2159,6 +2381,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { replica :'0', freedisk:'<1000'}," +
" { nodeRole : overseer, replica :0}]," +
" cluster-preferences :[{ minimize : cores, precision : 2 }]}";
if(useNodeset){
autoScalingjson = " { cluster-policy:[" +
" { replica :'0', nodeset:{ freedisk:'<1000'}}," +
" { replica :0, nodeset : {nodeRole : overseer}}]," +
" cluster-preferences :[{ minimize : cores, precision : 2 }]}";
}
AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json"))).getViolations();
assertEquals(1, violations.size());
@ -2185,6 +2413,12 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { replica :'#ALL', freedisk:'>1000'}," +
" { nodeRole : overseer, replica :0}]," +
" cluster-preferences :[{ minimize : cores, precision : 2 }]}";
if(useNodeset){
autoScalingjson = " { cluster-policy:[" +
" { replica :'#ALL', nodeset:{ freedisk:'>1000'}}," +
" { replica :0 , nodeset : {nodeRole : overseer}}]," +
" cluster-preferences :[{ minimize : cores, precision : 2 }]}";
}
cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testFreeDiskSuggestions.json"))).getViolations();
assertEquals(1, violations.size());
@ -2237,6 +2471,17 @@ public class TestPolicy extends SolrTestCaseJ4 {
" { 'replica': '1', shard:'#EACH', sysprop.fs : 'ssd'}" +
" ]" +
"}";
if(useNodeset){
autoScalingjson = "{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 3}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': '1', shard:'#EACH', nodeset:{ sysprop.fs : 'ssd'}}" +
" ]" +
"}";
}
AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
@ -2260,6 +2505,18 @@ public class TestPolicy extends SolrTestCaseJ4 {
" ]" +
"}";
if(useNodeset){
autoScalingjson = "{" +
" 'cluster-preferences': [" +
" { 'maximize': 'freedisk', 'precision': 50}," +
" { 'minimize': 'cores', 'precision': 3}" +
" ]," +
" 'cluster-policy': [" +
" { 'replica': 0, shard:'#EACH', nodeset :{ port : '8983'}}" +
" ]" +
"}";
}
AutoScalingConfig cfg = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
List<Violation> violations = cfg.getPolicy().createSession(cloudManagerWithData((Map) loadFromResource("testPortSuggestions.json"))).getViolations();
assertEquals(2, violations.size());

View File

@ -53,6 +53,11 @@ import static org.apache.solr.common.util.Utils.MAPOBJBUILDER;
import static org.apache.solr.common.util.Utils.getObjectByPath;
public class TestPolicy2 extends SolrTestCaseJ4 {
boolean useNodeset ;
public TestPolicy2(){
useNodeset = true;
}
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public void testEqualOnNonNode() {
@ -60,6 +65,12 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
String autoScalingjson = "{cluster-policy:[" +
" { replica : '<3' , shard : '#EACH', sysprop.zone: [east,west] } ]," +
" 'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
if(useNodeset){
autoScalingjson = "{cluster-policy:[" +
" { replica : '<3' , shard : '#EACH', nodeset:{ sysprop.zone: [east,west] }} ]," +
" 'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
}
Policy policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
Policy.Session session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
List<Violation> violations = session.getViolations();
@ -74,6 +85,11 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
autoScalingjson = "{cluster-policy:[" +
" { replica : '<3' , shard : '#EACH', sysprop.zone: '#EACH' } ]," +
" 'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
if(useNodeset){
autoScalingjson = "{cluster-policy:[" +
" { replica : '<3' , shard : '#EACH', nodeset:{sysprop.zone: '#EACH'} } ]," +
" 'cluster-preferences':[{ minimize : cores},{maximize : freedisk, precision : 50}]}";
}
policy = new Policy((Map<String, Object>) Utils.fromJSONString(autoScalingjson));
session = policy.createSession(createCloudManager(l.get(0), l.get(1)));
violations = session.getViolations();
@ -335,6 +351,22 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
Map<String, Object> m = (Map<String, Object>) loadFromResource("testSysPropSuggestions.json");
Map<String, Object> conf = (Map<String, Object>) getObjectByPath(m, false, "diagnostics/config");
if(useNodeset){
conf = (Map<String, Object>) Utils.fromJSONString("{" +
" 'cluster-preferences':[{" +
" 'minimize':'cores'," +
" 'precision':1}," +
" {" +
" 'maximize':'freedisk'," +
" 'precision':100}," +
" {" +
" 'minimize':'sysLoadAvg'," +
" 'precision':10}]," +
" 'cluster-policy':[{" +
" 'replica':'<3'," +
" 'shard':'#EACH'," +
" nodeset: {'sysprop.zone':['east','west']}}]}");
}
Policy policy = new Policy(conf);
SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
Policy.Session session = policy.createSession(cloudManagerFromDiagnostics);
@ -361,6 +393,17 @@ public class TestPolicy2 extends SolrTestCaseJ4 {
" {'minimize':'sysLoadAvg','precision':10}]," +
" 'cluster-policy':[" +
"{'replica':'<5','shard':'#EACH','sysprop.zone':['east','west']}]}";
if(useNodeset){
conf = " {" +
" 'cluster-preferences':[{" +
" 'minimize':'cores'," +
" 'precision':1}," +
" {'maximize':'freedisk','precision':100}," +
" {'minimize':'sysLoadAvg','precision':10}]," +
" 'cluster-policy':[" +
"{'replica':'<5','shard':'#EACH', nodeset:{'sysprop.zone':['east','west']}}]}";
}
Map<String, Object> m = (Map<String, Object>) loadFromResource("testSuggestionsRebalanceOnly.json");
SolrCloudManager cloudManagerFromDiagnostics = createCloudManagerFromDiagnostics(m);
AutoScalingConfig autoScalingConfig = new AutoScalingConfig((Map<String, Object>) Utils.fromJSONString(conf));

View File

@ -0,0 +1,26 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.cloud.autoscaling;
public class TestPolicy2Old extends TestPolicy2 {
public TestPolicy2Old(){
super();
useNodeset = false;
}
}

View File

@ -0,0 +1,25 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.cloud.autoscaling;
public class TestPolicyOld extends TestPolicy {
public TestPolicyOld(){
super();
useNodeset = false;
}
}

View File

@ -892,6 +892,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
queryWithPreferReplicaTypes(getRandomClient(), "TLOG|PULL", true, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT", false, collectionName);
queryWithPreferReplicaTypes(getRandomClient(), "NRT|PULL", true, collectionName);
CollectionAdminRequest.deleteCollection(collectionName)
.processAndWait(cluster.getSolrClient(), TIMEOUT);
}
private void queryWithPreferReplicaTypes(CloudSolrClient cloudClient,

View File

@ -552,7 +552,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
StreamFactory factory = new StreamFactory()
.withCollectionZkHost(COLLECTIONORALIAS, cluster.getZkServer().getZkAddress())
.withFunctionName("random", RandomStream.class);
.withFunctionName("random", RandomFacadeStream.class);
StreamContext context = new StreamContext();
@ -572,6 +572,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
List<Tuple> tuples2 = getTuples(stream);
assert (tuples2.size() == 1000);
boolean different = false;
for (int i = 0; i < tuples1.size(); i++) {
Tuple tuple1 = tuples1.get(i);
@ -601,6 +602,44 @@ public class StreamExpressionTest extends SolrCloudTestCase {
List<Tuple> tuples3 = getTuples(stream);
assert (tuples3.size() == 1);
//Exercise the DeepRandomStream with higher rows
expression = StreamExpressionParser.parse("random(" + COLLECTIONORALIAS + ", q=\"*:*\", rows=\"10001\", fl=\"id, a_i\")");
stream = factory.constructStream(expression);
stream.setStreamContext(context);
List<Tuple> tuples10 = getTuples(stream);
assert (tuples10.size() == 1000);
expression = StreamExpressionParser.parse("random(" + COLLECTIONORALIAS + ", q=\"*:*\", rows=\"10001\", fl=\"id, a_i\")");
stream = factory.constructStream(expression);
stream.setStreamContext(context);
List<Tuple> tuples11 = getTuples(stream);
assert (tuples11.size() == 1000);
different = false;
for (int i = 0; i < tuples10.size(); i++) {
Tuple tuple1 = tuples10.get(i);
Tuple tuple2 = tuples11.get(i);
if (!tuple1.get("id").equals(tuple2.get(id))) {
different = true;
break;
}
}
assertTrue(different);
Collections.sort(tuples10, new FieldComparator("id", ComparatorOrder.ASCENDING));
Collections.sort(tuples11, new FieldComparator("id", ComparatorOrder.ASCENDING));
for (int i = 0; i < tuples10.size(); i++) {
Tuple tuple1 = tuples10.get(i);
Tuple tuple2 = tuples11.get(i);
if (!tuple1.get("id").equals(tuple2.get(id))) {
assert(tuple1.getLong("id").equals(tuple2.get("a_i")));
}
}
//Exercise the /stream handler
ModifiableSolrParams sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
@ -610,6 +649,13 @@ public class StreamExpressionTest extends SolrCloudTestCase {
List<Tuple> tuples4 = getTuples(solrStream);
assert (tuples4.size() == 1);
sParams = new ModifiableSolrParams(StreamingTest.mapParams(CommonParams.QT, "/stream"));
sParams.add("expr", "random(" + COLLECTIONORALIAS + ", q=\"*:*\", rows=\"10001\", fl=\"id, a_i\")");
jetty = cluster.getJettySolrRunner(0);
solrStream = new SolrStream(jetty.getBaseUrl().toString() + "/collection1", sParams);
tuples4 = getTuples(solrStream);
assert (tuples4.size() == 1000);
} finally {
cache.close();
}

View File

@ -16,17 +16,24 @@
*/
package org.apache.solr.common.cloud;
import java.io.File;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.security.NoSuchAlgorithmException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.AbstractZkTestCase;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.cloud.ZkTestServer;
import org.apache.solr.util.ExternalPaths;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
@ -35,23 +42,28 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SolrZkClientTest extends SolrTestCaseJ4 {
public class SolrZkClientTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final String ROOT = "/";
private static final String PATH = "/collections/collection1";
protected ZkTestServer zkServer;
SolrZkClient aclClient;
SolrZkClient credentialsClient;
SolrZkClient defaultClient;
private CloudSolrClient solrClient;
@Override
public void setUp() throws Exception {
super.setUp();
configureCluster(1)
.addConfig("_default", new File(ExternalPaths.DEFAULT_CONFIGSET).toPath())
.configure();
solrClient = getCloudSolrClient(cluster.getZkServer().getZkAddress());
final String SCHEME = "digest";
final String AUTH = "user:pass";
@ -59,7 +71,7 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
log.info("ZooKeeper dataDir:" + zkDir);
zkServer = new ZkTestServer(zkDir);
zkServer.run();
try (SolrZkClient client = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT)) {
// Set up chroot
client.makePath("/solr", false, true);
@ -67,7 +79,7 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
defaultClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT);
defaultClient.makePath(PATH, true);
aclClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT) {
@Override
protected ZkACLProvider createZkACLProvider() {
@ -84,7 +96,7 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
};
}
};
credentialsClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT) {
@Override
protected ZkCredentialsProvider createZkCredentialsToAddAutomatically() {
@ -97,17 +109,19 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
}
};
}
@Override
public void tearDown() throws Exception {
aclClient.close();
credentialsClient.close();
defaultClient.close();
zkServer.shutdown();
solrClient.close();
cluster.shutdown();
super.tearDown();
}
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // annotated on: 24-Dec-2018
public void testSimpleUpdateACLs() throws KeeperException, InterruptedException {
@ -131,7 +145,59 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
assertTrue("Default client should read unaffected paths", canRead(defaultClient, ROOT));
assertFalse("Default client should not read secure children", canRead(defaultClient, PATH));
}
@Test
// SOLR-13491
public void testWrappingWatches() throws Exception {
AtomicInteger calls = new AtomicInteger(0);
Watcher watcherA = new Watcher() {
@Override
public void process(WatchedEvent event) {
calls.getAndIncrement();
}
};
Watcher watcherB = new Watcher() {
@Override
public void process(WatchedEvent event) {
calls.getAndDecrement();
}
};
Watcher wrapped1A = defaultClient.wrapWatcher(watcherA);
Watcher wrapped2A = defaultClient.wrapWatcher(watcherA);
Watcher wrappedB = defaultClient.wrapWatcher(watcherB);
assertTrue(wrapped1A.equals(wrapped2A));
assertTrue(wrapped2A.equals(wrapped1A));
assertFalse(wrapped1A.equals(wrappedB));
assertEquals(wrapped1A.hashCode(), wrapped2A.hashCode());
CollectionAdminRequest.createCollection(getSaferTestName(), "_default", 1, 1)
.setMaxShardsPerNode(2)
.process(solrClient);
CollectionAdminRequest.setCollectionProperty(getSaferTestName(),"foo", "bar")
.process(solrClient);
//Thread.sleep(600000);
solrClient.getZkStateReader().getZkClient().getData("/collections/" + getSaferTestName() + "/collectionprops.json",wrapped1A, null,true);
solrClient.getZkStateReader().getZkClient().getData("/collections/" + getSaferTestName() + "/collectionprops.json",wrapped2A, null,true);
CollectionAdminRequest.setCollectionProperty(getSaferTestName(),"baz", "bam")
.process(solrClient);
Thread.sleep(1000); // make sure zk client watch has time to be notified.
assertEquals(1, calls.get()); // same wrapped watch set twice, only invoked once
solrClient.getZkStateReader().getZkClient().getData("/collections/" + getSaferTestName() + "/collectionprops.json",wrapped1A, null,true);
solrClient.getZkStateReader().getZkClient().getData("/collections/" + getSaferTestName() + "/collectionprops.json",wrappedB, null,true);
CollectionAdminRequest.setCollectionProperty(getSaferTestName(),"baz", "bang")
.process(solrClient);
Thread.sleep(1000); // make sure zk client watch has time to be notified.
assertEquals(1, calls.get()); // offsetting watches, no change
}
private static boolean canRead(SolrZkClient zkClient, String path) throws KeeperException, InterruptedException {
try {
zkClient.getData(path, null, null, true);
@ -140,7 +206,7 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
return false;
}
}
@Test
public void testCheckInterrupted() {
assertFalse(Thread.currentThread().isInterrupted());
@ -149,6 +215,6 @@ public class SolrZkClientTest extends SolrTestCaseJ4 {
SolrZkClient.checkInterrupted(new InterruptedException());
assertTrue(Thread.currentThread().isInterrupted());
}
}

View File

@ -32,9 +32,7 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.util.ExecutorUtil;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -44,34 +42,26 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int CLUSTER_SIZE = 4;
private static final ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("backgroundWatchers");
private static final int MAX_WAIT_TIMEOUT = 30;
@BeforeClass
public static void startCluster() throws Exception {
}
@AfterClass
public static void shutdownBackgroundExecutors() {
executor.shutdown();
}
private ExecutorService executor = null;
@Before
public void prepareCluster() throws Exception {
configureCluster(CLUSTER_SIZE)
.addConfig("config", getFile("solrj/solr/collection1/conf").toPath())
.configure();
executor = ExecutorUtil.newMDCAwareCachedThreadPool("backgroundWatchers");
}
@After
public void tearDownCluster() throws Exception {
executor.shutdown();
shutdownCluster();
executor = null;
}
private static Future<Boolean> waitInBackground(String collection, long timeout, TimeUnit unit,
private Future<Boolean> waitInBackground(String collection, long timeout, TimeUnit unit,
CollectionStatePredicate predicate) {
return executor.submit(() -> {
try {
@ -83,7 +73,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
});
}
private static void waitFor(String message, long timeout, TimeUnit unit, Callable<Boolean> predicate)
private void waitFor(String message, long timeout, TimeUnit unit, Callable<Boolean> predicate)
throws InterruptedException, ExecutionException {
Future<Boolean> future = executor.submit(() -> {
try {