From 23a747bf27c0fdccd74426de96541f6c549d3c51 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sat, 5 Mar 2016 04:51:14 -0500 Subject: [PATCH 01/11] add END to the end of SimpleText's points data file --- .../lucene/codecs/simpletext/SimpleTextPointWriter.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java index a20e4876350..1554c0c39e8 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java @@ -23,10 +23,10 @@ import java.util.Map; import org.apache.lucene.codecs.PointReader; import org.apache.lucene.codecs.PointWriter; -import org.apache.lucene.index.PointValues.IntersectVisitor; -import org.apache.lucene.index.PointValues.Relation; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PointValues.IntersectVisitor; +import org.apache.lucene.index.PointValues.Relation; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; @@ -53,6 +53,7 @@ class SimpleTextPointWriter extends PointWriter { final static BytesRef MAX_VALUE = new BytesRef("max value "); final static BytesRef POINT_COUNT = new BytesRef("point count "); final static BytesRef DOC_COUNT = new BytesRef("doc count "); + final static BytesRef END = new BytesRef("END"); private IndexOutput dataOut; final BytesRefBuilder scratch = new BytesRefBuilder(); @@ -210,6 +211,8 @@ class SimpleTextPointWriter extends PointWriter { @Override public void finish() throws IOException { + SimpleTextUtil.write(dataOut, END); + SimpleTextUtil.writeNewline(dataOut); SimpleTextUtil.writeChecksum(dataOut, scratch); } From 9f29e330542524d817c2fa4794af447cf6a1d1d4 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sat, 5 Mar 2016 04:57:14 -0500 Subject: [PATCH 02/11] fix very rare test bug that I somehow hit --- .../org/apache/lucene/facet/range/TestRangeFacetCounts.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java index e7e5d572088..9fde6e3ed9f 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java @@ -421,7 +421,10 @@ public class TestRangeFacetCounts extends FacetTestCase { } boolean minIncl; boolean maxIncl; - if (min == max) { + + // NOTE: max - min >= 0 is here to handle the common overflow case! + if (max - min >= 0 && max - min < 2) { + // If max == min or max == min+1, we always do inclusive, else we might pass an empty range and hit exc from LongRange's ctor: minIncl = true; maxIncl = true; } else { From cb4a91eb7c61678939cd65d739ed57c4335181a3 Mon Sep 17 00:00:00 2001 From: Varun Thacker Date: Sat, 5 Mar 2016 13:15:19 +0530 Subject: [PATCH 03/11] SOLR-8449: Fix the core restore functionality to allow restoring multiple times on the same core --- solr/CHANGES.txt | 3 + .../org/apache/solr/handler/RestoreCore.java | 6 +- .../apache/solr/handler/TestRestoreCore.java | 57 +++++++++++-------- 3 files changed, 40 insertions(+), 26 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 645533b97e3..6c039b5c131 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -264,6 +264,9 @@ Bug Fixes * SOLR-8779: Fix missing InterruptedException handling in ZkStateReader.java (Varun Thacker) +* SOLR-8449: Fix the core restore functionality to allow restoring multiple times on the same core + (Johannes Brucher, Varun Thacker) + Optimizations ---------------------- * SOLR-7876: Speed up queries and operations that use many terms when timeAllowed has not been diff --git a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java index a6c1da90ec8..9949d3fcf85 100644 --- a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java +++ b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java @@ -19,6 +19,9 @@ package org.apache.solr.handler; import java.lang.invoke.MethodHandles; import java.nio.file.Path; import java.nio.file.Paths; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Locale; import java.util.concurrent.Callable; import java.util.concurrent.Future; @@ -55,7 +58,8 @@ public class RestoreCore implements Callable { private boolean doRestore() throws Exception { Path backupPath = Paths.get(backupLocation).resolve(backupName); - String restoreIndexName = "restore." + backupName; + SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT); + String restoreIndexName = "restore." + dateFormat.format(new Date()); String restoreIndexPath = core.getDataDir() + restoreIndexName; Directory restoreIndexDir = null; diff --git a/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java b/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java index b219a8dbb0d..1218783d7b2 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java +++ b/solr/core/src/test/org/apache/solr/handler/TestRestoreCore.java @@ -138,36 +138,43 @@ public class TestRestoreCore extends SolrJettyTestBase { Thread.sleep(1000); } - //Modify existing index before we call restore. - //Delete a few docs - int numDeletes = TestUtil.nextInt(random(), 1, nDocs); - for(int i=0; i= nTerms) break; - countAcc.incrementCount(arrIdx, 1); - processor.collectFirstPhase(segDoc, arrIdx); + if (arrIdx >= 0) { + if (arrIdx >= nTerms) break; + countAcc.incrementCount(arrIdx, 1); + processor.collectFirstPhase(segDoc, arrIdx); + } delta = 0; } code >>>= 8; diff --git a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java index 7decfce45a4..90c239481bd 100644 --- a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java +++ b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java @@ -215,9 +215,6 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 { List methods = multiValued ? multiValuedMethods : singleValuedMethods; List responses = new ArrayList<>(methods.size()); for (String method : methods) { - if (method.equals("uif") && params.get("facet.prefix")!=null) { - continue; // it's not supported there - } if (method.equals("dv")) { params.set("facet.field", "{!key="+facet_field+"}"+facet_field+"_dv"); params.set("facet.method",(String) null); diff --git a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java index 1541a462811..3b99eb88692 100644 --- a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java +++ b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java @@ -512,7 +512,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { params.set("facet.method", method); } for (String prefix : prefixes) { - if (prefix == null || "uif".equals(method)) {// there is no support + if (prefix == null) { params.remove("facet.prefix"); } else { params.set("facet.prefix", prefix); @@ -2016,16 +2016,6 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { doFacetPrefix("tt_s1", "{!threads=2}", "", "facet.method","fcs"); // specific number of threads } - /** no prefix for uif */ - @Test(expected=RuntimeException.class) - public void testNOFacetPrefixForUif() { - if (random().nextBoolean()) { - doFacetPrefix("tt_s1", null, "", "facet.method", "uif"); - } else { - doFacetPrefix("t_s", null, "", "facet.method", "uif"); - } - } - @Test @Ignore("SOLR-8466 - facet.method=uif ignores facet.contains") public void testFacetContainsUif() { diff --git a/solr/core/src/test/org/apache/solr/request/TestFaceting.java b/solr/core/src/test/org/apache/solr/request/TestFaceting.java index 16a6b1364db..97dcedfe6a0 100644 --- a/solr/core/src/test/org/apache/solr/request/TestFaceting.java +++ b/solr/core/src/test/org/apache/solr/request/TestFaceting.java @@ -529,12 +529,8 @@ public class TestFaceting extends SolrTestCaseJ4 { } private void assertQforUIF(String message, SolrQueryRequest request, String ... tests) { - final String paramString = request.getParamString(); - if (paramString.contains("uif") && paramString.contains("prefix")){ - assertQEx("uif prohibits prefix", "not supported", request, ErrorCode.BAD_REQUEST); - }else{ - assertQ(message,request, tests); - } + // handle any differences for uif here, like skipping unsupported options + assertQ(message,request, tests); } private void add50ocs() { diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java index 7df03f14dcc..83220edc468 100644 --- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java +++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java @@ -644,6 +644,27 @@ public class TestJsonFacets extends SolrTestCaseHS { " } " ); + // test prefix on real multi-valued field + client.testJQ(params(p, "q", "*:*" + , "json.facet", "{" + + " f1:{${terms} type:terms, field:${multi_ss}, prefix:A }" + + ",f2:{${terms} type:terms, field:${multi_ss}, prefix:z }" + + ",f3:{${terms} type:terms, field:${multi_ss}, prefix:aa }" + + ",f4:{${terms} type:terms, field:${multi_ss}, prefix:bb }" + + ",f5:{${terms} type:terms, field:${multi_ss}, prefix:a }" + + ",f6:{${terms} type:terms, field:${multi_ss}, prefix:b }" + + "}" + ) + , "facets=={ 'count':6 " + + ",f1:{buckets:[]}" + + ",f2:{buckets:[]}" + + ",f3:{buckets:[]}" + + ",f4:{buckets:[]}" + + ",f5:{buckets:[ {val:a,count:3} ]}" + + ",f6:{buckets:[ {val:b,count:3} ]}" + + " } " + ); + // // missing // From e2ebbdf638984f76bfb6432bc554368609461955 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Sun, 6 Mar 2016 08:27:02 -0500 Subject: [PATCH 06/11] LUCENE-7070: insert missing 's' in PointFormat/Reader/Writer --- .../lucene/codecs/lucene50/Lucene50Codec.java | 6 +- .../lucene/codecs/lucene53/Lucene53Codec.java | 8 +-- .../lucene/codecs/lucene54/Lucene54Codec.java | 8 +-- .../simpletext/SimpleTextBKDReader.java | 6 +- .../codecs/simpletext/SimpleTextCodec.java | 8 +-- ...ormat.java => SimpleTextPointsFormat.java} | 16 ++--- ...eader.java => SimpleTextPointsReader.java} | 44 +++++++------- ...riter.java => SimpleTextPointsWriter.java} | 14 ++--- ...t.java => TestSimpleTextPointsFormat.java} | 4 +- .../java/org/apache/lucene/codecs/Codec.java | 2 +- .../org/apache/lucene/codecs/FilterCodec.java | 4 +- .../{PointFormat.java => PointsFormat.java} | 18 +++--- .../{PointReader.java => PointsReader.java} | 6 +- .../{PointWriter.java => PointsWriter.java} | 58 +++++++++---------- .../lucene/codecs/lucene60/Lucene60Codec.java | 6 +- ...tFormat.java => Lucene60PointsFormat.java} | 22 +++---- ...tReader.java => Lucene60PointsReader.java} | 24 ++++---- ...tWriter.java => Lucene60PointsWriter.java} | 38 ++++++------ .../lucene/codecs/lucene60/package-info.java | 4 +- .../org/apache/lucene/index/CheckIndex.java | 6 +- .../org/apache/lucene/index/CodecReader.java | 18 +++--- .../lucene/index/DefaultIndexingChain.java | 22 +++---- .../org/apache/lucene/index/FieldInfo.java | 2 +- .../lucene/index/FilterCodecReader.java | 6 +- .../org/apache/lucene/index/LeafReader.java | 2 +- .../org/apache/lucene/index/MergeState.java | 12 ++-- .../lucene/index/PointValuesWriter.java | 8 +-- .../lucene/index/SegmentCoreReaders.java | 10 ++-- .../apache/lucene/index/SegmentMerger.java | 4 +- .../apache/lucene/index/SegmentReader.java | 8 +-- .../lucene/index/SlowCodecReaderWrapper.java | 8 +-- ...mat.java => TestLucene60PointsFormat.java} | 28 ++++----- .../apache/lucene/index/TestPointValues.java | 10 ++-- .../lucene/search/TestPointQueries.java | 24 ++++---- .../TestLatLonPointDistanceQuery.java | 24 ++++---- .../apache/lucene/geo3d/TestGeo3DPoint.java | 24 ++++---- .../codecs/asserting/AssertingCodec.java | 8 +-- ...Format.java => AssertingPointsFormat.java} | 46 +++++++-------- .../lucene/codecs/cranky/CrankyCodec.java | 6 +- ...intFormat.java => CrankyPointsFormat.java} | 34 +++++------ ...ase.java => BasePointsFormatTestCase.java} | 6 +- .../org/apache/lucene/index/RandomCodec.java | 24 ++++---- ...at.java => TestAssertingPointsFormat.java} | 6 +- 43 files changed, 321 insertions(+), 321 deletions(-) rename lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/{SimpleTextPointFormat.java => SimpleTextPointsFormat.java} (77%) rename lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/{SimpleTextPointReader.java => SimpleTextPointsReader.java} (92%) rename lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/{SimpleTextPointWriter.java => SimpleTextPointsWriter.java} (94%) rename lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/{TestSimpleTextPointFormat.java => TestSimpleTextPointsFormat.java} (88%) rename lucene/core/src/java/org/apache/lucene/codecs/{PointFormat.java => PointsFormat.java} (87%) rename lucene/core/src/java/org/apache/lucene/codecs/{PointReader.java => PointsReader.java} (89%) rename lucene/core/src/java/org/apache/lucene/codecs/{PointWriter.java => PointsWriter.java} (66%) rename lucene/core/src/java/org/apache/lucene/codecs/lucene60/{Lucene60PointFormat.java => Lucene60PointsFormat.java} (84%) rename lucene/core/src/java/org/apache/lucene/codecs/lucene60/{Lucene60PointReader.java => Lucene60PointsReader.java} (91%) rename lucene/core/src/java/org/apache/lucene/codecs/lucene60/{Lucene60PointWriter.java => Lucene60PointsWriter.java} (86%) rename lucene/core/src/test/org/apache/lucene/codecs/lucene60/{TestLucene60PointFormat.java => TestLucene60PointsFormat.java} (68%) rename lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/{AssertingPointFormat.java => AssertingPointsFormat.java} (85%) rename lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/{CrankyPointFormat.java => CrankyPointsFormat.java} (82%) rename lucene/test-framework/src/java/org/apache/lucene/index/{BasePointFormatTestCase.java => BasePointsFormatTestCase.java} (99%) rename lucene/test-framework/src/test/org/apache/lucene/codecs/asserting/{TestAssertingPointFormat.java => TestAssertingPointsFormat.java} (85%) diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java index 08ea3e9a35d..001439ce14d 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java @@ -21,12 +21,12 @@ import java.util.Objects; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.CompoundFormat; -import org.apache.lucene.codecs.PointFormat; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.SegmentInfoFormat; import org.apache.lucene.codecs.StoredFieldsFormat; @@ -154,8 +154,8 @@ public class Lucene50Codec extends Codec { } @Override - public final PointFormat pointFormat() { - return PointFormat.EMPTY; + public final PointsFormat pointsFormat() { + return PointsFormat.EMPTY; } private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50"); diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java index ab90306a020..7630194c3c4 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene53/Lucene53Codec.java @@ -21,12 +21,12 @@ import java.util.Objects; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.CompoundFormat; -import org.apache.lucene.codecs.PointFormat; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.SegmentInfoFormat; import org.apache.lucene.codecs.StoredFieldsFormat; @@ -35,8 +35,8 @@ import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat; import org.apache.lucene.codecs.lucene50.Lucene50FieldInfosFormat; import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat; import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat; -import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat; import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; @@ -160,8 +160,8 @@ public class Lucene53Codec extends Codec { } @Override - public final PointFormat pointFormat() { - return PointFormat.EMPTY; + public final PointsFormat pointsFormat() { + return PointsFormat.EMPTY; } private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50"); diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java index 62ef89cd958..2dde0cf6d90 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/codecs/lucene54/Lucene54Codec.java @@ -21,12 +21,12 @@ import java.util.Objects; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.CompoundFormat; -import org.apache.lucene.codecs.PointFormat; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.SegmentInfoFormat; import org.apache.lucene.codecs.StoredFieldsFormat; @@ -35,8 +35,8 @@ import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat; import org.apache.lucene.codecs.lucene50.Lucene50FieldInfosFormat; import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat; import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat; -import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat; import org.apache.lucene.codecs.lucene53.Lucene53NormsFormat; import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; @@ -160,8 +160,8 @@ public class Lucene54Codec extends Codec { } @Override - public final PointFormat pointFormat() { - return PointFormat.EMPTY; + public final PointsFormat pointsFormat() { + return PointsFormat.EMPTY; } private final PostingsFormat defaultFormat = PostingsFormat.forName("Lucene50"); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java index 09c40ec36fd..6e2e1ac815a 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java @@ -27,9 +27,9 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.bkd.BKDReader; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.BLOCK_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.BLOCK_DOC_ID; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.BLOCK_VALUE; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_DOC_ID; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_VALUE; class SimpleTextBKDReader extends BKDReader { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java index 4cec13dc8b8..109fec980ca 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextCodec.java @@ -19,11 +19,11 @@ package org.apache.lucene.codecs.simpletext; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.CompoundFormat; -import org.apache.lucene.codecs.PointFormat; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.SegmentInfoFormat; import org.apache.lucene.codecs.StoredFieldsFormat; @@ -45,7 +45,7 @@ public final class SimpleTextCodec extends Codec { private final LiveDocsFormat liveDocs = new SimpleTextLiveDocsFormat(); private final DocValuesFormat dvFormat = new SimpleTextDocValuesFormat(); private final CompoundFormat compoundFormat = new SimpleTextCompoundFormat(); - private final PointFormat pointFormat = new SimpleTextPointFormat(); + private final PointsFormat pointsFormat = new SimpleTextPointsFormat(); public SimpleTextCodec() { super("SimpleText"); @@ -97,7 +97,7 @@ public final class SimpleTextCodec extends Codec { } @Override - public PointFormat pointFormat() { - return pointFormat; + public PointsFormat pointsFormat() { + return pointsFormat; } } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsFormat.java similarity index 77% rename from lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointFormat.java rename to lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsFormat.java index 87b83104db9..5e3f57ac28c 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsFormat.java @@ -19,9 +19,9 @@ package org.apache.lucene.codecs.simpletext; import java.io.IOException; -import org.apache.lucene.codecs.PointFormat; -import org.apache.lucene.codecs.PointReader; -import org.apache.lucene.codecs.PointWriter; +import org.apache.lucene.codecs.PointsFormat; +import org.apache.lucene.codecs.PointsReader; +import org.apache.lucene.codecs.PointsWriter; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; @@ -33,16 +33,16 @@ import org.apache.lucene.index.SegmentWriteState; * any text editor, and even edit it to alter your index. * * @lucene.experimental */ -public final class SimpleTextPointFormat extends PointFormat { +public final class SimpleTextPointsFormat extends PointsFormat { @Override - public PointWriter fieldsWriter(SegmentWriteState state) throws IOException { - return new SimpleTextPointWriter(state); + public PointsWriter fieldsWriter(SegmentWriteState state) throws IOException { + return new SimpleTextPointsWriter(state); } @Override - public PointReader fieldsReader(SegmentReadState state) throws IOException { - return new SimpleTextPointReader(state); + public PointsReader fieldsReader(SegmentReadState state) throws IOException { + return new SimpleTextPointsReader(state); } /** Extension of points data file */ diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java similarity index 92% rename from lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointReader.java rename to lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java index 05afd93c7d8..1477f17fa0b 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsReader.java @@ -22,7 +22,7 @@ import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; -import org.apache.lucene.codecs.PointReader; +import org.apache.lucene.codecs.PointsReader; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; @@ -37,36 +37,36 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.bkd.BKDReader; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.BLOCK_FP; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.BYTES_PER_DIM; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.DOC_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.FIELD_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.FIELD_FP; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.FIELD_FP_NAME; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.INDEX_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.MAX_LEAF_POINTS; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.MAX_VALUE; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.MIN_VALUE; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.NUM_DIMS; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.POINT_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.SPLIT_COUNT; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.SPLIT_DIM; -import static org.apache.lucene.codecs.simpletext.SimpleTextPointWriter.SPLIT_VALUE; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_FP; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BYTES_PER_DIM; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.DOC_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.FIELD_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.FIELD_FP; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.FIELD_FP_NAME; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.INDEX_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_LEAF_POINTS; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MAX_VALUE; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.MIN_VALUE; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.NUM_DIMS; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.POINT_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_COUNT; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_DIM; +import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.SPLIT_VALUE; -class SimpleTextPointReader extends PointReader { +class SimpleTextPointsReader extends PointsReader { private final IndexInput dataIn; final SegmentReadState readState; final Map readers = new HashMap<>(); final BytesRefBuilder scratch = new BytesRefBuilder(); - public SimpleTextPointReader(SegmentReadState readState) throws IOException { + public SimpleTextPointsReader(SegmentReadState readState) throws IOException { // Initialize readers now: // Read index: Map fieldToFileOffset = new HashMap<>(); - String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointFormat.POINT_INDEX_EXTENSION); + String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION); try (ChecksumIndexInput in = readState.directory.openChecksumInput(indexFileName, IOContext.DEFAULT)) { readLine(in); int count = parseInt(FIELD_COUNT); @@ -81,7 +81,7 @@ class SimpleTextPointReader extends PointReader { } boolean success = false; - String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointFormat.POINT_EXTENSION); + String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_EXTENSION); dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT); try { for(Map.Entry ent : fieldToFileOffset.entrySet()) { @@ -98,7 +98,7 @@ class SimpleTextPointReader extends PointReader { } private BKDReader initReader(long fp) throws IOException { - // NOTE: matches what writeIndex does in SimpleTextPointWriter + // NOTE: matches what writeIndex does in SimpleTextPointsWriter dataIn.seek(fp); readLine(dataIn); int numDims = parseInt(NUM_DIMS); @@ -231,7 +231,7 @@ class SimpleTextPointReader extends PointReader { @Override public String toString() { - return "SimpleTextPointReader(segment=" + readState.segmentInfo.name + " maxDoc=" + readState.segmentInfo.maxDoc() + ")"; + return "SimpleTextPointsReader(segment=" + readState.segmentInfo.name + " maxDoc=" + readState.segmentInfo.maxDoc() + ")"; } @Override diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java similarity index 94% rename from lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java rename to lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java index 1554c0c39e8..d2b848d262c 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -import org.apache.lucene.codecs.PointReader; -import org.apache.lucene.codecs.PointWriter; +import org.apache.lucene.codecs.PointsReader; +import org.apache.lucene.codecs.PointsWriter; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.PointValues.IntersectVisitor; @@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.bkd.BKDWriter; -class SimpleTextPointWriter extends PointWriter { +class SimpleTextPointsWriter extends PointsWriter { final static BytesRef NUM_DIMS = new BytesRef("num dims "); final static BytesRef BYTES_PER_DIM = new BytesRef("bytes per dim "); @@ -60,14 +60,14 @@ class SimpleTextPointWriter extends PointWriter { final SegmentWriteState writeState; final Map indexFPs = new HashMap<>(); - public SimpleTextPointWriter(SegmentWriteState writeState) throws IOException { - String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointFormat.POINT_EXTENSION); + public SimpleTextPointsWriter(SegmentWriteState writeState) throws IOException { + String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointsFormat.POINT_EXTENSION); dataOut = writeState.directory.createOutput(fileName, writeState.context); this.writeState = writeState; } @Override - public void writeField(FieldInfo fieldInfo, PointReader values) throws IOException { + public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException { // We use the normal BKDWriter, but subclass to customize how it writes the index and blocks to disk: try (BKDWriter writer = new BKDWriter(writeState.segmentInfo.maxDoc(), @@ -223,7 +223,7 @@ class SimpleTextPointWriter extends PointWriter { dataOut = null; // Write index file - String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointFormat.POINT_INDEX_EXTENSION); + String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION); try (IndexOutput indexOut = writeState.directory.createOutput(fileName, writeState.context)) { int count = indexFPs.size(); write(indexOut, FIELD_COUNT); diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointsFormat.java similarity index 88% rename from lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointFormat.java rename to lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointsFormat.java index 7637584f3bc..4594caf7b35 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointFormat.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/simpletext/TestSimpleTextPointsFormat.java @@ -18,12 +18,12 @@ package org.apache.lucene.codecs.simpletext; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.index.BasePointFormatTestCase; +import org.apache.lucene.index.BasePointsFormatTestCase; /** * Tests SimpleText's point format */ -public class TestSimpleTextPointFormat extends BasePointFormatTestCase { +public class TestSimpleTextPointsFormat extends BasePointsFormatTestCase { private final Codec codec = new SimpleTextCodec(); @Override diff --git a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java index a263acdd738..5d704ca017d 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java @@ -109,7 +109,7 @@ public abstract class Codec implements NamedSPILoader.NamedSPI { public abstract CompoundFormat compoundFormat(); /** Encodes/decodes points index */ - public abstract PointFormat pointFormat(); + public abstract PointsFormat pointsFormat(); /** looks up a codec by name */ public static Codec forName(String name) { diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java index 4fe236ea2cf..9abd8d4f331 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java @@ -105,7 +105,7 @@ public abstract class FilterCodec extends Codec { } @Override - public PointFormat pointFormat() { - return delegate.pointFormat(); + public PointsFormat pointsFormat() { + return delegate.pointsFormat(); } } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PointFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/PointsFormat.java similarity index 87% rename from lucene/core/src/java/org/apache/lucene/codecs/PointFormat.java rename to lucene/core/src/java/org/apache/lucene/codecs/PointsFormat.java index 964f8f02bfb..e49bf53afc6 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/PointFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/PointsFormat.java @@ -26,16 +26,16 @@ import org.apache.lucene.index.SegmentWriteState; * Encodes/decodes indexed points. * * @lucene.experimental */ -public abstract class PointFormat { +public abstract class PointsFormat { /** * Creates a new point format. */ - protected PointFormat() { + protected PointsFormat() { } /** Writes a new segment */ - public abstract PointWriter fieldsWriter(SegmentWriteState state) throws IOException; + public abstract PointsWriter fieldsWriter(SegmentWriteState state) throws IOException; /** Reads a segment. NOTE: by the time this call * returns, it must hold open any files it will need to @@ -46,18 +46,18 @@ public abstract class PointFormat { * IOExceptions are expected and will automatically cause a retry of the * segment opening logic with the newly revised segments. * */ - public abstract PointReader fieldsReader(SegmentReadState state) throws IOException; + public abstract PointsReader fieldsReader(SegmentReadState state) throws IOException; - /** A {@code PointFormat} that has nothing indexed */ - public static final PointFormat EMPTY = new PointFormat() { + /** A {@code PointsFormat} that has nothing indexed */ + public static final PointsFormat EMPTY = new PointsFormat() { @Override - public PointWriter fieldsWriter(SegmentWriteState state) { + public PointsWriter fieldsWriter(SegmentWriteState state) { throw new UnsupportedOperationException(); } @Override - public PointReader fieldsReader(SegmentReadState state) { - return new PointReader() { + public PointsReader fieldsReader(SegmentReadState state) { + return new PointsReader() { @Override public void close() { } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PointReader.java b/lucene/core/src/java/org/apache/lucene/codecs/PointsReader.java similarity index 89% rename from lucene/core/src/java/org/apache/lucene/codecs/PointReader.java rename to lucene/core/src/java/org/apache/lucene/codecs/PointsReader.java index 70878409c4d..ab21431cd6f 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/PointReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/PointsReader.java @@ -27,10 +27,10 @@ import org.apache.lucene.util.Accountable; * * @lucene.experimental */ -public abstract class PointReader extends PointValues implements Closeable, Accountable { +public abstract class PointsReader extends PointValues implements Closeable, Accountable { /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ - protected PointReader() {} + protected PointsReader() {} /** * Checks consistency of this reader. @@ -45,7 +45,7 @@ public abstract class PointReader extends PointValues implements Closeable, Acco * Returns an instance optimized for merging. *

* The default implementation returns {@code this} */ - public PointReader getMergeInstance() throws IOException { + public PointsReader getMergeInstance() throws IOException { return this; } } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PointWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java similarity index 66% rename from lucene/core/src/java/org/apache/lucene/codecs/PointWriter.java rename to lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java index c2972a8d1d7..56689ecae7c 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/PointWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java @@ -28,57 +28,57 @@ import org.apache.lucene.index.MergeState; * @lucene.experimental */ -public abstract class PointWriter implements Closeable { +public abstract class PointsWriter implements Closeable { /** Sole constructor. (For invocation by subclass * constructors, typically implicit.) */ - protected PointWriter() { + protected PointsWriter() { } /** Write all values contained in the provided reader */ - public abstract void writeField(FieldInfo fieldInfo, PointReader values) throws IOException; + public abstract void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException; /** Default naive merge implementation for one field: it just re-indexes all the values * from the incoming segment. The default codec overrides this for 1D fields and uses * a faster but more complex implementation. */ protected void mergeOneField(MergeState mergeState, FieldInfo fieldInfo) throws IOException { writeField(fieldInfo, - new PointReader() { + new PointsReader() { @Override public void intersect(String fieldName, IntersectVisitor mergedVisitor) throws IOException { if (fieldName.equals(fieldInfo.name) == false) { throw new IllegalArgumentException("field name must match the field being merged"); } - for (int i=0;i readers = new HashMap<>(); /** Sole constructor */ - public Lucene60PointReader(SegmentReadState readState) throws IOException { + public Lucene60PointsReader(SegmentReadState readState) throws IOException { this.readState = readState; String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, - Lucene60PointFormat.INDEX_EXTENSION); + Lucene60PointsFormat.INDEX_EXTENSION); Map fieldToFileOffset = new HashMap<>(); @@ -60,9 +60,9 @@ public class Lucene60PointReader extends PointReader implements Closeable { Throwable priorE = null; try { CodecUtil.checkIndexHeader(indexIn, - Lucene60PointFormat.META_CODEC_NAME, - Lucene60PointFormat.INDEX_VERSION_START, - Lucene60PointFormat.INDEX_VERSION_START, + Lucene60PointsFormat.META_CODEC_NAME, + Lucene60PointsFormat.INDEX_VERSION_START, + Lucene60PointsFormat.INDEX_VERSION_START, readState.segmentInfo.getId(), readState.segmentSuffix); int count = indexIn.readVInt(); @@ -80,15 +80,15 @@ public class Lucene60PointReader extends PointReader implements Closeable { String dataFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, - Lucene60PointFormat.DATA_EXTENSION); + Lucene60PointsFormat.DATA_EXTENSION); boolean success = false; dataIn = readState.directory.openInput(dataFileName, readState.context); try { CodecUtil.checkIndexHeader(dataIn, - Lucene60PointFormat.DATA_CODEC_NAME, - Lucene60PointFormat.DATA_VERSION_START, - Lucene60PointFormat.DATA_VERSION_START, + Lucene60PointsFormat.DATA_CODEC_NAME, + Lucene60PointsFormat.DATA_VERSION_START, + Lucene60PointsFormat.DATA_VERSION_START, readState.segmentInfo.getId(), readState.segmentSuffix); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java similarity index 86% rename from lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointWriter.java rename to lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java index 8a00d4cb687..3d09c456dd0 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60PointsWriter.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.Map; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.codecs.PointReader; -import org.apache.lucene.codecs.PointWriter; +import org.apache.lucene.codecs.PointsReader; +import org.apache.lucene.codecs.PointsWriter; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexFileNames; @@ -40,7 +40,7 @@ import org.apache.lucene.util.bkd.BKDReader; import org.apache.lucene.util.bkd.BKDWriter; /** Writes dimensional values */ -public class Lucene60PointWriter extends PointWriter implements Closeable { +public class Lucene60PointsWriter extends PointsWriter implements Closeable { final IndexOutput dataOut; final Map indexFPs = new HashMap<>(); @@ -50,20 +50,20 @@ public class Lucene60PointWriter extends PointWriter implements Closeable { private boolean finished; /** Full constructor */ - public Lucene60PointWriter(SegmentWriteState writeState, int maxPointsInLeafNode, double maxMBSortInHeap) throws IOException { + public Lucene60PointsWriter(SegmentWriteState writeState, int maxPointsInLeafNode, double maxMBSortInHeap) throws IOException { assert writeState.fieldInfos.hasPointValues(); this.writeState = writeState; this.maxPointsInLeafNode = maxPointsInLeafNode; this.maxMBSortInHeap = maxMBSortInHeap; String dataFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, - Lucene60PointFormat.DATA_EXTENSION); + Lucene60PointsFormat.DATA_EXTENSION); dataOut = writeState.directory.createOutput(dataFileName, writeState.context); boolean success = false; try { CodecUtil.writeIndexHeader(dataOut, - Lucene60PointFormat.DATA_CODEC_NAME, - Lucene60PointFormat.DATA_VERSION_CURRENT, + Lucene60PointsFormat.DATA_CODEC_NAME, + Lucene60PointsFormat.DATA_VERSION_CURRENT, writeState.segmentInfo.getId(), writeState.segmentSuffix); success = true; @@ -75,12 +75,12 @@ public class Lucene60PointWriter extends PointWriter implements Closeable { } /** Uses the defaults values for {@code maxPointsInLeafNode} (1024) and {@code maxMBSortInHeap} (16.0) */ - public Lucene60PointWriter(SegmentWriteState writeState) throws IOException { + public Lucene60PointsWriter(SegmentWriteState writeState) throws IOException { this(writeState, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP); } @Override - public void writeField(FieldInfo fieldInfo, PointReader values) throws IOException { + public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException { try (BKDWriter writer = new BKDWriter(writeState.segmentInfo.maxDoc(), writeState.directory, @@ -115,14 +115,14 @@ public class Lucene60PointWriter extends PointWriter implements Closeable { @Override public void merge(MergeState mergeState) throws IOException { - for(PointReader reader : mergeState.pointReaders) { - if (reader instanceof Lucene60PointReader == false) { + for(PointsReader reader : mergeState.pointsReaders) { + if (reader instanceof Lucene60PointsReader == false) { // We can only bulk merge when all to-be-merged segments use our format: super.merge(mergeState); return; } } - for (PointReader reader : mergeState.pointReaders) { + for (PointsReader reader : mergeState.pointsReaders) { if (reader != null) { reader.checkIntegrity(); } @@ -145,14 +145,14 @@ public class Lucene60PointWriter extends PointWriter implements Closeable { List bkdReaders = new ArrayList<>(); List docMaps = new ArrayList<>(); List docIDBases = new ArrayList<>(); - for(int i=0;i *

  • - * {@link org.apache.lucene.codecs.lucene60.Lucene60PointFormat Point values}. + * {@link org.apache.lucene.codecs.lucene60.Lucene60PointsFormat Point values}. * Optional pair of files, recording dimesionally indexed fields, to enable fast * numeric range filtering and large numeric values like BigInteger and BigDecimal (1D) * and geo shape intersection (2D, 3D). @@ -322,7 +322,7 @@ * Info about what files are live * * - * {@link org.apache.lucene.codecs.lucene60.Lucene60PointFormat Point values} + * {@link org.apache.lucene.codecs.lucene60.Lucene60PointsFormat Point values} * .dii, .dim * Holds indexed points, if any * diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java index 0bfa350eaba..3c437c1c508 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java +++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java @@ -33,9 +33,9 @@ import java.util.Locale; import java.util.Map; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.PointReader; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PointsReader; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.TermVectorsReader; @@ -1687,9 +1687,9 @@ public final class CheckIndex implements Closeable { Status.PointsStatus status = new Status.PointsStatus(); try { if (fieldInfos.hasPointValues()) { - PointReader values = reader.getPointReader(); + PointsReader values = reader.getPointsReader(); if (values == null) { - throw new RuntimeException("there are fields with points, but reader.getPointReader() is null"); + throw new RuntimeException("there are fields with points, but reader.getPointsReader() is null"); } for (FieldInfo fieldInfo : fieldInfos) { if (fieldInfo.getPointDimensionCount() > 0) { diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java index eb536488696..194acd8edcb 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java @@ -25,10 +25,10 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.lucene.codecs.PointReader; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PointsReader; import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.util.Accountable; @@ -77,10 +77,10 @@ public abstract class CodecReader extends LeafReader implements Accountable { public abstract FieldsProducer getPostingsReader(); /** - * Expert: retrieve underlying PointReader + * Expert: retrieve underlying PointsReader * @lucene.internal */ - public abstract PointReader getPointReader(); + public abstract PointsReader getPointsReader(); @Override public final void document(int docID, StoredFieldVisitor visitor) throws IOException { @@ -323,8 +323,8 @@ public abstract class CodecReader extends LeafReader implements Accountable { } // points - if (getPointReader() != null) { - ramBytesUsed += getPointReader().ramBytesUsed(); + if (getPointsReader() != null) { + ramBytesUsed += getPointsReader().ramBytesUsed(); } return ramBytesUsed; @@ -359,8 +359,8 @@ public abstract class CodecReader extends LeafReader implements Accountable { } // points - if (getPointReader() != null) { - resources.add(Accountables.namedAccountable("points", getPointReader())); + if (getPointsReader() != null) { + resources.add(Accountables.namedAccountable("points", getPointsReader())); } return Collections.unmodifiableList(resources); @@ -394,8 +394,8 @@ public abstract class CodecReader extends LeafReader implements Accountable { } // points - if (getPointReader() != null) { - getPointReader().checkIntegrity(); + if (getPointsReader() != null) { + getPointsReader().checkIntegrity(); } } } diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java index d1a68da78dd..408f0bb5917 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java +++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java @@ -23,12 +23,12 @@ import java.util.HashMap; import java.util.Map; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.codecs.PointFormat; -import org.apache.lucene.codecs.PointWriter; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.NormsConsumer; import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; +import org.apache.lucene.codecs.PointsWriter; import org.apache.lucene.codecs.StoredFieldsWriter; import org.apache.lucene.document.FieldType; import org.apache.lucene.search.similarities.Similarity; @@ -149,7 +149,7 @@ final class DefaultIndexingChain extends DocConsumer { /** Writes all buffered points. */ private void writePoints(SegmentWriteState state) throws IOException { - PointWriter pointWriter = null; + PointsWriter pointsWriter = null; boolean success = false; try { for (int i=0;i Date: Mon, 7 Mar 2016 09:55:39 +0100 Subject: [PATCH 07/11] LUCENE-7066: Optimize PointRangeQuery for the case that all documents have a value and all points from the segment match. --- lucene/CHANGES.txt | 3 + .../apache/lucene/search/PointRangeQuery.java | 145 +++++++++++------- .../lucene/search/TestPointQueries.java | 43 +++++- 3 files changed, 130 insertions(+), 61 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index b6d0bb3d973..c23a3fe08a2 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -135,6 +135,9 @@ Optimizations * LUCENE-7050: TermsQuery is now cached more aggressively by the default query caching policy. (Adrien Grand) +* LUCENE-7066: PointRangeQuery got optimized for the case that all documents + have a value and all points from the segment match. (Adrien Grand) + Changes in Runtime Behavior * LUCENE-6789: IndexSearcher's default Similarity is changed to BM25Similarity. diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java index 85c486e7dde..777c1338813 100644 --- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java @@ -125,6 +125,68 @@ public abstract class PointRangeQuery extends Query { return new ConstantScoreWeight(this) { + private DocIdSet buildMatchingDocIdSet(LeafReader reader, PointValues values, + byte[] packedLower, byte[] packedUpper) throws IOException { + DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc()); + + values.intersect(field, + new IntersectVisitor() { + + @Override + public void grow(int count) { + result.grow(count); + } + + @Override + public void visit(int docID) { + result.add(docID); + } + + @Override + public void visit(int docID, byte[] packedValue) { + for(int dim=0;dim 0) { + // Doc's value is too high, in this dimension + return; + } + } + + // Doc is in-bounds + result.add(docID); + } + + @Override + public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { + + boolean crosses = false; + + for(int dim=0;dim 0 || + StringHelper.compare(bytesPerDim, maxPackedValue, offset, packedLower, offset) < 0) { + return Relation.CELL_OUTSIDE_QUERY; + } + + crosses |= StringHelper.compare(bytesPerDim, minPackedValue, offset, packedLower, offset) < 0 || + StringHelper.compare(bytesPerDim, maxPackedValue, offset, packedUpper, offset) > 0; + } + + if (crosses) { + return Relation.CELL_CROSSES_QUERY; + } else { + return Relation.CELL_INSIDE_QUERY; + } + } + }); + return result.build(); + } + @Override public Scorer scorer(LeafReaderContext context) throws IOException { LeafReader reader = context.reader(); @@ -155,67 +217,32 @@ public abstract class PointRangeQuery extends Query { System.arraycopy(upperPoint[dim], 0, packedUpper, dim*bytesPerDim, bytesPerDim); } - // Now packedLowerIncl and packedUpperIncl are inclusive, and non-empty space: + boolean allDocsMatch; + if (values.getDocCount(field) == reader.maxDoc()) { + final byte[] fieldPackedLower = values.getMinPackedValue(field); + final byte[] fieldPackedUpper = values.getMaxPackedValue(field); + allDocsMatch = true; + for (int i = 0; i < numDims; ++i) { + int offset = i * bytesPerDim; + if (StringHelper.compare(bytesPerDim, packedLower, offset, fieldPackedLower, offset) > 0 + || StringHelper.compare(bytesPerDim, packedUpper, offset, fieldPackedUpper, offset) < 0) { + allDocsMatch = false; + break; + } + } + } else { + allDocsMatch = false; + } - DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc()); + DocIdSetIterator iterator; + if (allDocsMatch) { + // all docs have a value and all points are within bounds, so everything matches + iterator = DocIdSetIterator.all(reader.maxDoc()); + } else { + iterator = buildMatchingDocIdSet(reader, values, packedLower, packedUpper).iterator(); + } - values.intersect(field, - new IntersectVisitor() { - - @Override - public void grow(int count) { - result.grow(count); - } - - @Override - public void visit(int docID) { - result.add(docID); - } - - @Override - public void visit(int docID, byte[] packedValue) { - for(int dim=0;dim 0) { - // Doc's value is too high, in this dimension - return; - } - } - - // Doc is in-bounds - result.add(docID); - } - - @Override - public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - - boolean crosses = false; - - for(int dim=0;dim 0 || - StringHelper.compare(bytesPerDim, maxPackedValue, offset, packedLower, offset) < 0) { - return Relation.CELL_OUTSIDE_QUERY; - } - - crosses |= StringHelper.compare(bytesPerDim, minPackedValue, offset, packedLower, offset) < 0 || - StringHelper.compare(bytesPerDim, maxPackedValue, offset, packedUpper, offset) > 0; - } - - if (crosses) { - return Relation.CELL_CROSSES_QUERY; - } else { - return Relation.CELL_INSIDE_QUERY; - } - } - }); - - return new ConstantScoreScorer(this, score(), result.build().iterator()); + return new ConstantScoreScorer(this, score(), iterator); } }; } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java index cd0d7196585..5a3483bc30f 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java @@ -53,13 +53,11 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiDocValues; -import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; -import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -1847,4 +1845,45 @@ public class TestPointQueries extends LuceneTestCase { // binary assertEquals("bytes:{[12] [2a]}", BinaryPoint.newSetQuery("bytes", new byte[] {42}, new byte[] {18}).toString()); } + + public void testRangeOptimizesIfAllPointsMatch() throws IOException { + final int numDims = TestUtil.nextInt(random(), 1, 3); + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + Document doc = new Document(); + int[] value = new int[numDims]; + for (int i = 0; i < numDims; ++i) { + value[i] = TestUtil.nextInt(random(), 1, 10); + } + doc.add(new IntPoint("point", value)); + w.addDocument(doc); + IndexReader reader = w.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); + searcher.setQueryCache(null); + int[] lowerBound = new int[numDims]; + int[] upperBound = new int[numDims]; + for (int i = 0; i < numDims; ++i) { + lowerBound[i] = value[i] - random().nextInt(1); + upperBound[i] = value[i] + random().nextInt(1); + } + Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound); + Weight weight = searcher.createNormalizedWeight(query, false); + Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0)); + assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass()); + + // When not all documents in the query have a value, the optimization is not applicable + reader.close(); + w.addDocument(new Document()); + w.forceMerge(1); + reader = w.getReader(); + searcher = new IndexSearcher(reader); + searcher.setQueryCache(null); + weight = searcher.createNormalizedWeight(query, false); + scorer = weight.scorer(searcher.getIndexReader().leaves().get(0)); + assertFalse(DocIdSetIterator.all(1).getClass().equals(scorer.iterator().getClass())); + + reader.close(); + w.close(); + dir.close(); + } } From 258d67e974c622b855856a217a35a34378d1dbc6 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Mon, 7 Mar 2016 11:21:12 +0100 Subject: [PATCH 08/11] Update URL to Lucene's KEYS file --- dev-tools/scripts/buildAndPushRelease.py | 2 +- lucene/build.xml | 2 +- solr/build.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-tools/scripts/buildAndPushRelease.py b/dev-tools/scripts/buildAndPushRelease.py index 8a6d6ba99a3..396a0765253 100644 --- a/dev-tools/scripts/buildAndPushRelease.py +++ b/dev-tools/scripts/buildAndPushRelease.py @@ -187,7 +187,7 @@ def pushLocal(version, root, rev, rcNum, localDir): os.remove('%s/solr/package/solr.tar.bz2' % root) print(' KEYS') - run('wget http://people.apache.org/keys/group/lucene.asc') + run('wget http://home.apache.org/keys/group/lucene.asc') os.rename('lucene.asc', 'KEYS') run('chmod a+r-w KEYS') run('cp KEYS ../lucene') diff --git a/lucene/build.xml b/lucene/build.xml index 117059e51e0..08b2c36370e 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -395,7 +395,7 @@ - diff --git a/solr/build.xml b/solr/build.xml index 1ab1a927986..218bf8ce6d3 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -444,7 +444,7 @@ - From 267e326dbf137de5357e0aca7418f648752cb22a Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Mon, 7 Mar 2016 05:22:40 -0500 Subject: [PATCH 09/11] LUCENE-7072: always use WGS84 planet model in Geo3DPoint --- lucene/CHANGES.txt | 3 ++ .../apache/lucene/geo3d/BasePlanetObject.java | 5 +++ .../org/apache/lucene/geo3d/Geo3DPoint.java | 43 ++++++++----------- .../lucene/geo3d/PointInGeo3DShapeQuery.java | 30 ++++++------- .../apache/lucene/geo3d/TestGeo3DPoint.java | 25 +++++------ 5 files changed, 52 insertions(+), 54 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index c23a3fe08a2..3647d5d191c 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -118,6 +118,9 @@ API Changes * LUCENE-7064: MultiPhraseQuery is now immutable and should be constructed with MultiPhraseQuery.Builder. (Luc Vanlerberghe via Adrien Grand) +* LUCENE-7072: Geo3DPoint always uses WGS84 planet model. + (Robert Muir, Mike McCandless) + Optimizations * LUCENE-6891: Use prefix coding when writing points in diff --git a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/BasePlanetObject.java b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/BasePlanetObject.java index b5e3d286adb..c64b974fd1f 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/BasePlanetObject.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/BasePlanetObject.java @@ -34,6 +34,11 @@ public abstract class BasePlanetObject { public BasePlanetObject(final PlanetModel planetModel) { this.planetModel = planetModel; } + + /** Returns the {@link PlanetModel} provided when this shape was created. */ + public PlanetModel getPlanetModel() { + return planetModel; + } @Override public int hashCode() { diff --git a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/Geo3DPoint.java b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/Geo3DPoint.java index fbdb00d7a3c..cde87f3c77b 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/Geo3DPoint.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/Geo3DPoint.java @@ -36,8 +36,6 @@ import org.apache.lucene.util.RamUsageEstimator; * @lucene.experimental */ public final class Geo3DPoint extends Field { - private final PlanetModel planetModel; - /** Indexing {@link FieldType}. */ public static final FieldType TYPE = new FieldType(); static { @@ -46,16 +44,15 @@ public final class Geo3DPoint extends Field { } /** - * Creates a new Geo3DPoint field with the specified lat, lon (in radians), given a planet model. + * Creates a new Geo3DPoint field with the specified lat, lon (in radians). * * @throws IllegalArgumentException if the field name is null or lat or lon are out of bounds */ - public Geo3DPoint(String name, PlanetModel planetModel, double lat, double lon) { + public Geo3DPoint(String name, double lat, double lon) { super(name, TYPE); - this.planetModel = planetModel; // Translate lat/lon to x,y,z: - final GeoPoint point = new GeoPoint(planetModel, lat, lon); - fillFieldsData(planetModel, point.x, point.y, point.z); + final GeoPoint point = new GeoPoint(PlanetModel.WGS84, lat, lon); + fillFieldsData(point.x, point.y, point.z); } /** @@ -63,40 +60,38 @@ public final class Geo3DPoint extends Field { * * @throws IllegalArgumentException if the field name is null or lat or lon are out of bounds */ - public Geo3DPoint(String name, PlanetModel planetModel, double x, double y, double z) { + public Geo3DPoint(String name, double x, double y, double z) { super(name, TYPE); - this.planetModel = planetModel; - fillFieldsData(planetModel, x, y, z); + fillFieldsData(x, y, z); } - private void fillFieldsData(PlanetModel planetModel, double x, double y, double z) { + private void fillFieldsData(double x, double y, double z) { byte[] bytes = new byte[12]; - encodeDimension(planetModel, x, bytes, 0); - encodeDimension(planetModel, y, bytes, Integer.BYTES); - encodeDimension(planetModel, z, bytes, 2*Integer.BYTES); + encodeDimension(x, bytes, 0); + encodeDimension(y, bytes, Integer.BYTES); + encodeDimension(z, bytes, 2*Integer.BYTES); fieldsData = new BytesRef(bytes); } // public helper methods (e.g. for queries) /** Encode single dimension */ - public static void encodeDimension(PlanetModel planetModel, double value, byte bytes[], int offset) { - NumericUtils.intToSortableBytes(Geo3DUtil.encodeValue(planetModel.getMaximumMagnitude(), value), bytes, offset); + public static void encodeDimension(double value, byte bytes[], int offset) { + NumericUtils.intToSortableBytes(Geo3DUtil.encodeValue(PlanetModel.WGS84.getMaximumMagnitude(), value), bytes, offset); } /** Decode single dimension */ - public static double decodeDimension(PlanetModel planetModel, byte value[], int offset) { - return Geo3DUtil.decodeValueCenter(planetModel.getMaximumMagnitude(), NumericUtils.sortableBytesToInt(value, offset)); + public static double decodeDimension(byte value[], int offset) { + return Geo3DUtil.decodeValueCenter(PlanetModel.WGS84.getMaximumMagnitude(), NumericUtils.sortableBytesToInt(value, offset)); } /** Returns a query matching all points inside the provided shape. * - * @param planetModel The {@link PlanetModel} to use, which must match what was used during indexing * @param field field name. must not be {@code null}. * @param shape Which {@link GeoShape} to match */ - public static Query newShapeQuery(PlanetModel planetModel, String field, GeoShape shape) { - return new PointInGeo3DShapeQuery(planetModel, field, shape); + public static Query newShapeQuery(String field, GeoShape shape) { + return new PointInGeo3DShapeQuery(field, shape); } @Override @@ -108,9 +103,9 @@ public final class Geo3DPoint extends Field { result.append(':'); BytesRef bytes = (BytesRef) fieldsData; - result.append(" x=" + decodeDimension(planetModel, bytes.bytes, bytes.offset)); - result.append(" y=" + decodeDimension(planetModel, bytes.bytes, bytes.offset + Integer.BYTES)); - result.append(" z=" + decodeDimension(planetModel, bytes.bytes, bytes.offset + 2*Integer.BYTES)); + result.append(" x=" + decodeDimension(bytes.bytes, bytes.offset)); + result.append(" y=" + decodeDimension(bytes.bytes, bytes.offset + Integer.BYTES)); + result.append(" z=" + decodeDimension(bytes.bytes, bytes.offset + 2*Integer.BYTES)); result.append('>'); return result.toString(); } diff --git a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/PointInGeo3DShapeQuery.java b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/PointInGeo3DShapeQuery.java index 4d816963080..9e2132d680d 100644 --- a/lucene/spatial3d/src/java/org/apache/lucene/geo3d/PointInGeo3DShapeQuery.java +++ b/lucene/spatial3d/src/java/org/apache/lucene/geo3d/PointInGeo3DShapeQuery.java @@ -40,14 +40,19 @@ import org.apache.lucene.util.NumericUtils; class PointInGeo3DShapeQuery extends Query { final String field; - final PlanetModel planetModel; final GeoShape shape; /** The lats/lons must be clockwise or counter-clockwise. */ - public PointInGeo3DShapeQuery(PlanetModel planetModel, String field, GeoShape shape) { + public PointInGeo3DShapeQuery(String field, GeoShape shape) { this.field = field; - this.planetModel = planetModel; this.shape = shape; + + if (shape instanceof BasePlanetObject) { + BasePlanetObject planetObject = (BasePlanetObject) shape; + if (planetObject.getPlanetModel().equals(PlanetModel.WGS84) == false) { + throw new IllegalArgumentException("this qurey requires PlanetModel.WGS84, but got: " + planetObject.getPlanetModel()); + } + } } @Override @@ -88,7 +93,7 @@ class PointInGeo3DShapeQuery extends Query { assert xyzSolid.getRelationship(shape) == GeoArea.WITHIN || xyzSolid.getRelationship(shape) == GeoArea.OVERLAPS: "expected WITHIN (1) or OVERLAPS (2) but got " + xyzSolid.getRelationship(shape) + "; shape="+shape+"; XYZSolid="+xyzSolid; */ - double planetMax = planetModel.getMaximumMagnitude(); + double planetMax = PlanetModel.WGS84.getMaximumMagnitude(); DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc()); @@ -103,9 +108,9 @@ class PointInGeo3DShapeQuery extends Query { @Override public void visit(int docID, byte[] packedValue) { assert packedValue.length == 12; - double x = Geo3DPoint.decodeDimension(planetModel, packedValue, 0); - double y = Geo3DPoint.decodeDimension(planetModel, packedValue, Integer.BYTES); - double z = Geo3DPoint.decodeDimension(planetModel, packedValue, 2 * Integer.BYTES); + double x = Geo3DPoint.decodeDimension(packedValue, 0); + double y = Geo3DPoint.decodeDimension(packedValue, Integer.BYTES); + double z = Geo3DPoint.decodeDimension(packedValue, 2 * Integer.BYTES); if (shape.isWithin(x, y, z)) { result.add(docID); } @@ -129,7 +134,7 @@ class PointInGeo3DShapeQuery extends Query { assert yMin <= yMax; assert zMin <= zMax; - GeoArea xyzSolid = GeoAreaFactory.makeGeoArea(planetModel, xMin, xMax, yMin, yMax, zMin, zMax); + GeoArea xyzSolid = GeoAreaFactory.makeGeoArea(PlanetModel.WGS84, xMin, xMax, yMin, yMax, zMin, zMax); switch(xyzSolid.getRelationship(shape)) { case GeoArea.CONTAINS: @@ -165,10 +170,6 @@ class PointInGeo3DShapeQuery extends Query { return field; } - public PlanetModel getPlanetModel() { - return planetModel; - } - public GeoShape getShape() { return shape; } @@ -182,13 +183,12 @@ class PointInGeo3DShapeQuery extends Query { PointInGeo3DShapeQuery that = (PointInGeo3DShapeQuery) o; - return planetModel.equals(that.planetModel) && shape.equals(that.shape); + return shape.equals(that.shape); } @Override public final int hashCode() { int result = super.hashCode(); - result = 31 * result + planetModel.hashCode(); result = 31 * result + shape.hashCode(); return result; } @@ -203,8 +203,6 @@ class PointInGeo3DShapeQuery extends Query { sb.append(this.field); sb.append(':'); } - sb.append(" PlanetModel: "); - sb.append(planetModel); sb.append(" Shape: "); sb.append(shape); return sb.toString(); diff --git a/lucene/spatial3d/src/test/org/apache/lucene/geo3d/TestGeo3DPoint.java b/lucene/spatial3d/src/test/org/apache/lucene/geo3d/TestGeo3DPoint.java index 9d00d3e6ccb..17a40755d2f 100644 --- a/lucene/spatial3d/src/test/org/apache/lucene/geo3d/TestGeo3DPoint.java +++ b/lucene/spatial3d/src/test/org/apache/lucene/geo3d/TestGeo3DPoint.java @@ -106,13 +106,12 @@ public class TestGeo3DPoint extends LuceneTestCase { iwc.setCodec(getCodec()); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); - doc.add(new Geo3DPoint("field", PlanetModel.WGS84, toRadians(50.7345267), toRadians(-97.5303555))); + doc.add(new Geo3DPoint("field", toRadians(50.7345267), toRadians(-97.5303555))); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); // We can't wrap with "exotic" readers because the query must see the BKD3DDVFormat: IndexSearcher s = newSearcher(r, false); - assertEquals(1, s.search(Geo3DPoint.newShapeQuery(PlanetModel.WGS84, - "field", + assertEquals(1, s.search(Geo3DPoint.newShapeQuery("field", GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, toRadians(50), toRadians(-97), Math.PI/180.)), 1).totalHits); w.close(); r.close(); @@ -640,8 +639,6 @@ public class TestGeo3DPoint extends LuceneTestCase { private static void verify(double[] lats, double[] lons) throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); - PlanetModel planetModel = getPlanetModel(); - // Else we can get O(N^2) merging: int mbd = iwc.getMaxBufferedDocs(); if (mbd != -1 && mbd < lats.length/100) { @@ -662,7 +659,7 @@ public class TestGeo3DPoint extends LuceneTestCase { doc.add(newStringField("id", ""+id, Field.Store.NO)); doc.add(new NumericDocValuesField("id", id)); if (Double.isNaN(lats[id]) == false) { - doc.add(new Geo3DPoint("point", planetModel, lats[id], lons[id])); + doc.add(new Geo3DPoint("point", lats[id], lons[id])); } w.addDocument(doc); if (id > 0 && random().nextInt(100) == 42) { @@ -710,13 +707,13 @@ public class TestGeo3DPoint extends LuceneTestCase { for (int iter=0;iter", point.toString()); + Geo3DPoint point = new Geo3DPoint("point", toRadians(44.244272), toRadians(7.769736)); + assertEquals("Geo3DPoint ", point.toString()); } public void testShapeQueryToString() { - assertEquals("PointInGeo3DShapeQuery: field=point: PlanetModel: PlanetModel.SPHERE Shape: GeoStandardCircle: {planetmodel=PlanetModel.SPHERE, center=[lat=0.3861041107739683, lon=0.06780373760536706], radius=0.1(5.729577951308232)}", - Geo3DPoint.newShapeQuery(PlanetModel.SPHERE, "point", GeoCircleFactory.makeGeoCircle(PlanetModel.SPHERE, toRadians(44.244272), toRadians(7.769736), 0.1)).toString()); + assertEquals("PointInGeo3DShapeQuery: field=point: Shape: GeoStandardCircle: {planetmodel=PlanetModel.WGS84, center=[lat=0.3861041107739683, lon=0.06780373760536706], radius=0.1(5.729577951308232)}", + Geo3DPoint.newShapeQuery("point", GeoCircleFactory.makeGeoCircle(PlanetModel.WGS84, toRadians(44.244272), toRadians(7.769736), 0.1)).toString()); } private static Directory getDirectory() { From dd04b6173955d55348b3abaec4c2a3e875e12487 Mon Sep 17 00:00:00 2001 From: Shalin Shekhar Mangar Date: Mon, 7 Mar 2016 15:03:03 +0530 Subject: [PATCH 10/11] SOLR-8745: Deprecate costly ZkStateReader.updateClusterState(), replace with a narrow forceUpdateCollection(collection) (cherry picked from commit 093a8ce) --- solr/CHANGES.txt | 3 ++ .../hadoop/MorphlineGoLiveMiniMRTest.java | 1 - .../apache/solr/cloud/ElectionContext.java | 2 +- .../cloud/LeaderInitiatedRecoveryThread.java | 6 --- .../OverseerCollectionMessageHandler.java | 1 - .../org/apache/solr/cloud/ZkController.java | 2 +- .../solr/handler/CdcrRequestHandler.java | 2 +- .../solr/handler/admin/ClusterStatus.java | 3 -- .../handler/admin/CollectionsHandler.java | 2 - .../handler/admin/CoreAdminOperation.java | 6 +-- .../solr/handler/admin/RebalanceLeaders.java | 2 +- .../solr/cloud/BaseCdcrDistributedZkTest.java | 1 - .../solr/cloud/BasicDistributedZkTest.java | 4 +- .../cloud/ChaosMonkeyNothingIsSafeTest.java | 2 +- .../solr/cloud/ChaosMonkeyShardSplitTest.java | 2 +- .../solr/cloud/CollectionReloadTest.java | 2 +- .../cloud/CollectionTooManyReplicasTest.java | 6 +-- .../CollectionsAPIDistributedZkTest.java | 9 ++-- .../solr/cloud/CustomCollectionTest.java | 1 - .../apache/solr/cloud/DeleteShardTest.java | 2 - .../apache/solr/cloud/ForceLeaderTest.java | 11 ++--- .../apache/solr/cloud/HttpPartitionTest.java | 9 +--- .../LeaderFailoverAfterPartitionTest.java | 2 - .../LeaderInitiatedRecoveryOnCommitTest.java | 4 +- .../solr/cloud/MigrateRouteKeyTest.java | 4 +- .../org/apache/solr/cloud/OverseerTest.java | 12 ++--- .../solr/cloud/ReplicaPropertiesBase.java | 3 -- .../org/apache/solr/cloud/ShardSplitTest.java | 1 - .../org/apache/solr/cloud/SyncSliceTest.java | 1 - .../solr/cloud/TestCloudDeleteByQuery.java | 1 - .../apache/solr/cloud/TestCollectionAPI.java | 4 +- .../TestLeaderInitiatedRecoveryThread.java | 1 - .../solr/cloud/TestMiniSolrCloudCluster.java | 47 +++++++++---------- .../cloud/TestMiniSolrCloudClusterBase.java | 3 +- .../cloud/TestRandomRequestDistribution.java | 4 +- .../solr/cloud/TestRebalanceLeaders.java | 1 - .../solr/cloud/TestReplicaProperties.java | 1 - .../cloud/TestSolrCloudWithKerberosAlt.java | 1 + .../solr/cloud/UnloadDistributedZkTest.java | 4 +- .../apache/solr/cloud/ZkControllerTest.java | 2 +- .../solr/cloud/hdfs/StressHdfsTest.java | 3 +- .../cloud/overseer/ZkStateReaderTest.java | 6 +-- .../cloud/overseer/ZkStateWriterTest.java | 10 ++-- .../solr/common/cloud/ZkStateReader.java | 46 ++++++++++++++++++ .../solr/cloud/AbstractDistribZkTestBase.java | 4 +- .../cloud/AbstractFullDistribZkTestBase.java | 10 ++-- .../org/apache/solr/cloud/ChaosMonkey.java | 14 +----- 47 files changed, 129 insertions(+), 139 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 406776fe936..d7ae2269e24 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -296,6 +296,9 @@ Optimizations * SOLR-8720: ZkController#publishAndWaitForDownStates should use #publishNodeAsDown. (Mark Miller) +* SOLR-8745: Deprecate costly ZkStateReader.updateClusterState(), replace with a narrow + forceUpdateCollection(collection) (Scott Blum via shalin) + Other Changes ---------------------- diff --git a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java index 1cc1723db9b..95ed9b2b17d 100644 --- a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java +++ b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java @@ -646,7 +646,6 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { } Thread.sleep(200); - cloudClient.getZkStateReader().updateClusterState(); } if (TEST_NIGHTLY) { diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java index 210787757bb..38f6083bcb6 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java +++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java @@ -462,7 +462,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase { public void publishActiveIfRegisteredAndNotActive(SolrCore core) throws KeeperException, InterruptedException { if (core.getCoreDescriptor().getCloudDescriptor().hasRegistered()) { ZkStateReader zkStateReader = zkController.getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collection); ClusterState clusterState = zkStateReader.getClusterState(); Replica rep = (clusterState == null) ? null : clusterState.getReplica(collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP)); diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java b/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java index 7a72a6782ee..589ed83e833 100644 --- a/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java +++ b/solr/core/src/java/org/apache/solr/cloud/LeaderInitiatedRecoveryThread.java @@ -244,12 +244,6 @@ public class LeaderInitiatedRecoveryThread extends Thread { // see if the replica's node is still live, if not, no need to keep doing this loop ZkStateReader zkStateReader = zkController.getZkStateReader(); - try { - zkStateReader.updateClusterState(); - } catch (Exception exc) { - log.warn("Error when updating cluster state: "+exc); - } - if (!zkStateReader.getClusterState().liveNodesContain(replicaNodeName)) { log.warn("Node "+replicaNodeName+" hosting core "+coreNeedingRecovery+ " is no longer live. No need to keep trying to tell it to recover!"); diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java index 6b7f6067d8b..d7d894bc69b 100644 --- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java +++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java @@ -1371,7 +1371,6 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler return; } Thread.sleep(1000); - zkStateReader.updateClusterState(); } throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find new slice " + sliceName + " in collection " + collectionName diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java index 7d2752a75ae..81897b717e9 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java +++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java @@ -883,7 +883,7 @@ public final class ZkController { } // make sure we have an update cluster state right away - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collection); return shardId; } finally { MDCLoggingContext.clear(); diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java index 585c8396d23..23e4abac304 100644 --- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java @@ -361,7 +361,7 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw throws IOException, SolrServerException { ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController(); try { - zkController.getZkStateReader().updateClusterState(); + zkController.getZkStateReader().forceUpdateCollection(collection); } catch (Exception e) { log.warn("Error when updating cluster state", e); } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java b/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java index 667d9fa11f5..ff60adc465b 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/ClusterStatus.java @@ -57,9 +57,6 @@ public class ClusterStatus { @SuppressWarnings("unchecked") public void getClusterStatus(NamedList results) throws KeeperException, InterruptedException { - zkStateReader.updateClusterState(); - - // read aliases Aliases aliases = zkStateReader.getAliases(); Map> collectionVsAliases = new HashMap<>(); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java index de2104f4d07..593dac81bcc 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java @@ -920,8 +920,6 @@ public class CollectionsHandler extends RequestHandlerBase { + (checkLeaderOnly ? "leaders" : "replicas")); ZkStateReader zkStateReader = cc.getZkController().getZkStateReader(); for (int i = 0; i < numRetries; i++) { - - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); Collection shards = clusterState.getSlices(collectionName); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java index 8240189cf04..e755b82ff49 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java @@ -461,6 +461,7 @@ enum CoreAdminOperation { // to accept updates CloudDescriptor cloudDescriptor = core.getCoreDescriptor() .getCloudDescriptor(); + String collection = cloudDescriptor.getCollectionName(); if (retry % 15 == 0) { if (retry > 0 && log.isInfoEnabled()) @@ -470,7 +471,7 @@ enum CoreAdminOperation { waitForState + "; forcing ClusterState update from ZooKeeper"); // force a cluster state update - coreContainer.getZkController().getZkStateReader().updateClusterState(); + coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collection); } if (maxTries == 0) { @@ -483,7 +484,6 @@ enum CoreAdminOperation { } ClusterState clusterState = coreContainer.getZkController().getClusterState(); - String collection = cloudDescriptor.getCollectionName(); Slice slice = clusterState.getSlice(collection, cloudDescriptor.getShardId()); if (slice != null) { final Replica replica = slice.getReplicasMap().get(coreNodeName); @@ -937,4 +937,4 @@ enum CoreAdminOperation { return size; } -} \ No newline at end of file +} diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java index 4626fc92967..98e796da73d 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java @@ -79,7 +79,7 @@ class RebalanceLeaders { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command.")); } - coreContainer.getZkController().getZkStateReader().updateClusterState(); + coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName); ClusterState clusterState = coreContainer.getZkController().getClusterState(); DocCollection dc = clusterState.getCollection(collectionName); if (dc == null) { diff --git a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java index f1f3e9167b1..fe94309bba2 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java @@ -635,7 +635,6 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase { try { cloudClient.connect(); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); DocCollection coll = clusterState.getCollection(collection); diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index d25ce664809..8222e91677f 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -552,7 +552,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { Thread.sleep(5000); ChaosMonkey.start(cloudJettys.get(0).jetty); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection("multiunload2"); try { cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000); } catch (SolrException e) { @@ -830,7 +830,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { // we added a role of none on these creates - check for it ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(oneInstanceCollection2); Map slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2); assertNotNull(slices); String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP); diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java index 8cc80d9c843..7dceada1668 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java @@ -205,7 +205,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase // TODO: assert we didnt kill everyone - zkStateReader.updateClusterState(); + zkStateReader.updateLiveNodes(); assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0); diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java index 7a44561d38b..190db573a50 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java @@ -206,7 +206,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest { for (int i = 0; i < 30; i++) { Thread.sleep(3000); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection("collection1"); ClusterState clusterState = zkStateReader.getClusterState(); DocCollection collection1 = clusterState.getCollection("collection1"); Slice slice = collection1.getSlice("shard1"); diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java index b6eb5e2a494..65ff78bf06a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java @@ -103,7 +103,7 @@ public class CollectionReloadTest extends AbstractFullDistribZkTestBase { timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutSecs, TimeUnit.SECONDS); while (System.nanoTime() < timeout) { // state of leader should be active after session loss recovery - see SOLR-7338 - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); ClusterState cs = cloudClient.getZkStateReader().getClusterState(); Slice slice = cs.getSlice(testCollectionName, shardId); replicaState = slice.getReplica(leader.getName()).getStr(ZkStateReader.STATE_PROP); diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java index 92fea45e335..afc7c483fb0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java @@ -97,7 +97,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase assertEquals(0, response.getStatus()); ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1"); Replica rep = null; @@ -194,7 +194,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each // should have exactly two replicas ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); Map slices = zkStateReader.getClusterState().getSlicesMap(collectionName); assertEquals("There should be exaclty four slices", slices.size(), 4); assertNotNull("shardstart should exist", slices.get("shardstart")); @@ -275,7 +275,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase private List getAllNodeNames(String collectionName) throws KeeperException, InterruptedException { ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1"); List nodes = new ArrayList<>(); diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java index 93f82acf20d..641dadfc236 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java @@ -368,7 +368,6 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa } Thread.sleep(200); - cloudClient.getZkStateReader().updateClusterState(); } assertFalse("Still found collection that should be gone", cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2")); @@ -540,8 +539,6 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa } private void testNoCollectionSpecified() throws Exception { - - cloudClient.getZkStateReader().updateClusterState(); assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection")); assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2")); @@ -565,13 +562,13 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd); // in both cases, the collection should have default to the core name - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection"); + cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection2"); assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection")); assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2")); } private void testNoConfigSetExist() throws Exception { - cloudClient.getZkStateReader().updateClusterState(); assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3")); // try and create a SolrCore with no collection name @@ -592,7 +589,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa assertTrue(gotExp); TimeUnit.MILLISECONDS.sleep(200); // in both cases, the collection should have default to the core name - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection3"); Collection slices = cloudClient.getZkStateReader().getClusterState().getActiveSlices("corewithnocollection3"); int replicaCount = 0; diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java index 081e96f4f08..0951b5d6fc0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java @@ -409,7 +409,6 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { int attempts = 0; while (true) { if (attempts > 30) fail("Not enough active replicas in the shard 'x'"); - zkStateReader.updateClusterState(); attempts++; replicaCount = zkStateReader.getClusterState().getSlice(collectionName, "x").getReplicas().size(); if (replicaCount >= 1) break; diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java index 101bfb98c20..812fbe93218 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java @@ -96,7 +96,6 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase { ClusterState clusterState = zkStateReader.getClusterState(); int counter = 10; while (counter-- > 0) { - zkStateReader.updateClusterState(); clusterState = zkStateReader.getClusterState(); if (clusterState.getSlice("collection1", shard) == null) { break; @@ -142,7 +141,6 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase { boolean transition = false; for (int counter = 10; counter > 0; counter--) { - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); State sliceState = clusterState.getSlice("collection1", slice).getState(); if (sliceState == state) { diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java index c68fe9c3a26..a71c3e61413 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java @@ -89,7 +89,7 @@ public class ForceLeaderTest extends HttpPartitionTest { putNonLeadersIntoLIR(testCollectionName, SHARD1, zkController, leader, notLeaders); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); int numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1); assertEquals("Expected only 0 active replica but found " + numActiveReplicas + @@ -114,7 +114,7 @@ public class ForceLeaderTest extends HttpPartitionTest { // By now we have an active leader. Wait for recoveries to begin waitForRecoveriesToFinish(testCollectionName, cloudClient.getZkStateReader(), true); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); clusterState = cloudClient.getZkStateReader().getClusterState(); log.info("After forcing leader: " + clusterState.getSlice(testCollectionName, SHARD1)); // we have a leader @@ -187,7 +187,7 @@ public class ForceLeaderTest extends HttpPartitionTest { setReplicaState(testCollectionName, SHARD1, rep, State.DOWN); } - zkController.getZkStateReader().updateClusterState(); + zkController.getZkStateReader().forceUpdateCollection(testCollectionName); // Assert all replicas are down and that there is no leader assertEquals(0, getActiveOrRecoveringReplicas(testCollectionName, SHARD1).size()); @@ -224,7 +224,6 @@ public class ForceLeaderTest extends HttpPartitionTest { ClusterState clusterState = null; boolean transition = false; for (int counter = 10; counter > 0; counter--) { - zkStateReader.updateClusterState(); clusterState = zkStateReader.getClusterState(); Replica newLeader = clusterState.getSlice(collection, slice).getLeader(); if (newLeader == null) { @@ -259,7 +258,6 @@ public class ForceLeaderTest extends HttpPartitionTest { Replica.State replicaState = null; for (int counter = 10; counter > 0; counter--) { - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); replicaState = clusterState.getSlice(collection, slice).getReplica(replica.getName()).getState(); if (replicaState == state) { @@ -355,7 +353,6 @@ public class ForceLeaderTest extends HttpPartitionTest { for (int j = 0; j < notLeaders.size(); j++) lirStates[j] = zkController.getLeaderInitiatedRecoveryState(collectionName, shard, notLeaders.get(j).getName()); - zkController.getZkStateReader().updateClusterState(); ClusterState clusterState = zkController.getZkStateReader().getClusterState(); boolean allDown = true; for (State lirState : lirStates) @@ -391,7 +388,7 @@ public class ForceLeaderTest extends HttpPartitionTest { JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader)); leaderJetty.start(); waitForRecoveriesToFinish(collection, cloudClient.getZkStateReader(), true); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(collection); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); log.info("After bringing back leader: " + clusterState.getSlice(collection, SHARD1)); int numActiveReplicas = getNumberOfActiveReplicas(clusterState, collection, SHARD1); diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java index 8fecc84045a..f1960aa952e 100644 --- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java @@ -215,7 +215,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { // Verify that the partitioned replica is DOWN ZkStateReader zkr = cloudClient.getZkStateReader(); - zkr.updateClusterState(); // force the state to be fresh + zkr.forceUpdateCollection(testCollectionName);; // force the state to be fresh ClusterState cs = zkr.getClusterState(); Collection slices = cs.getActiveSlices(testCollectionName); Slice slice = slices.iterator().next(); @@ -645,18 +645,13 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase { final RTimer timer = new RTimer(); ZkStateReader zkr = cloudClient.getZkStateReader(); - zkr.updateClusterState(); // force the state to be fresh - + zkr.forceUpdateCollection(testCollectionName); ClusterState cs = zkr.getClusterState(); Collection slices = cs.getActiveSlices(testCollectionName); boolean allReplicasUp = false; long waitMs = 0L; long maxWaitMs = maxWaitSecs * 1000L; while (waitMs < maxWaitMs && !allReplicasUp) { - // refresh state every 2 secs - if (waitMs % 2000 == 0) - cloudClient.getZkStateReader().updateClusterState(); - cs = cloudClient.getZkStateReader().getClusterState(); assertNotNull(cs); Slice shard = cs.getSlice(testCollectionName, shardId); diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java index 6fd7c534809..0436d5e874b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java @@ -159,8 +159,6 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest { long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); while (System.nanoTime() < timeout) { - cloudClient.getZkStateReader().updateClusterState(); - List activeReps = getActiveOrRecoveringReplicas(testCollectionName, "shard1"); if (activeReps.size() >= 2) break; Thread.sleep(1000); diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java index 8d2cc70c786..7d6c633f482 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderInitiatedRecoveryOnCommitTest.java @@ -80,7 +80,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest Thread.sleep(sleepMsBeforeHealPartition); - cloudClient.getZkStateReader().updateClusterState(); // get the latest state + cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState()); @@ -128,7 +128,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest sendCommitWithRetry(replica); Thread.sleep(sleepMsBeforeHealPartition); - cloudClient.getZkStateReader().updateClusterState(); // get the latest state + cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState()); diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java index f9566e30f08..c09e0d1dc6a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java @@ -72,7 +72,7 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest { boolean ruleRemoved = false; long expiryTime = finishTime + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); while (System.nanoTime() < expiryTime) { - getCommonCloudSolrClient().getZkStateReader().updateClusterState(); + getCommonCloudSolrClient().getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION); state = getCommonCloudSolrClient().getZkStateReader().getClusterState(); slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2); Map routingRules = slice.getRoutingRules(); @@ -186,7 +186,7 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest { log.info("Response from target collection: " + response); assertEquals("DocCount on target collection does not match", splitKeyCount[0], response.getResults().getNumFound()); - getCommonCloudSolrClient().getZkStateReader().updateClusterState(); + getCommonCloudSolrClient().getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION); ClusterState state = getCommonCloudSolrClient().getZkStateReader().getClusterState(); Slice slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2); assertNotNull("Routing rule map is null", slice.getRoutingRules()); diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java index 66a214f7fcb..85a88ec3ae9 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java @@ -439,7 +439,6 @@ public class OverseerTest extends SolrTestCaseJ4 { int cloudStateSliceCount = 0; for (int i = 0; i < 40; i++) { cloudStateSliceCount = 0; - reader.updateClusterState(); ClusterState state = reader.getClusterState(); final Map slices = state.getSlicesMap(collection); if (slices != null) { @@ -524,7 +523,6 @@ public class OverseerTest extends SolrTestCaseJ4 { private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException { int maxIterations = 100; while (0 < maxIterations--) { - stateReader.updateClusterState(); final ClusterState state = stateReader.getClusterState(); Set availableCollections = state.getCollections(); int availableCount = 0; @@ -605,7 +603,6 @@ public class OverseerTest extends SolrTestCaseJ4 { private void verifyShardLeader(ZkStateReader reader, String collection, String shard, String expectedCore) throws InterruptedException, KeeperException { int maxIterations = 200; while(maxIterations-->0) { - reader.updateClusterState(); // poll state ZkNodeProps props = reader.getClusterState().getLeader(collection, shard); if(props!=null) { if(expectedCore.equals(props.getStr(ZkStateReader.CORE_NAME_PROP))) { @@ -832,7 +829,8 @@ public class OverseerTest extends SolrTestCaseJ4 { killerThread = new Thread(killer); killerThread.start(); - reader = new ZkStateReader(controllerClient); //no watches, we'll poll + reader = new ZkStateReader(controllerClient); + reader.createClusterStateWatchersAndUpdate(); for (int i = 0; i < atLeast(4); i++) { killCounter.incrementAndGet(); //for each round allow 1 kill @@ -905,9 +903,10 @@ public class OverseerTest extends SolrTestCaseJ4 { mockController = new MockZKController(server.getZkAddress(), "node1"); mockController.publishState(collection, "core1", "core_node1", Replica.State.RECOVERING, 1); - while (version == getClusterStateVersion(controllerClient)); + while (version == reader.getClusterState().getZkClusterStateVersion()) { + Thread.sleep(100); + } - reader.updateClusterState(); ClusterState state = reader.getClusterState(); int numFound = 0; @@ -1048,7 +1047,6 @@ public class OverseerTest extends SolrTestCaseJ4 { assertTrue(overseers.size() > 0); while (true) { - reader.updateClusterState(); ClusterState state = reader.getClusterState(); if (state.hasCollection("perf_sentinel")) { break; diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java index 8347af09372..fe83a8431a3 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java +++ b/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java @@ -56,7 +56,6 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas ClusterState clusterState = null; Replica replica = null; for (int idx = 0; idx < 300; ++idx) { - client.getZkStateReader().updateClusterState(); clusterState = client.getZkStateReader().getClusterState(); replica = clusterState.getReplica(collectionName, replicaName); if (replica == null) { @@ -82,7 +81,6 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas ClusterState clusterState = null; for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds. - client.getZkStateReader().updateClusterState(); clusterState = client.getZkStateReader().getClusterState(); replica = clusterState.getReplica(collectionName, replicaName); if (replica == null) { @@ -116,7 +114,6 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas DocCollection col = null; for (int idx = 0; idx < 300; ++idx) { - client.getZkStateReader().updateClusterState(); ClusterState clusterState = client.getZkStateReader().getClusterState(); col = clusterState.getCollection(collectionName); diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java index 22735abdb25..6d4b9cc3b8f 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java @@ -416,7 +416,6 @@ public class ShardSplitTest extends BasicDistributedZkTest { int i = 0; for (i = 0; i < 10; i++) { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.updateClusterState(); clusterState = zkStateReader.getClusterState(); slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0"); slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1"); diff --git a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java index e753be9f459..362009e684b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java @@ -218,7 +218,6 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase { for (int i = 0; i < 60; i++) { Thread.sleep(3000); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); DocCollection collection1 = clusterState.getCollection("collection1"); Slice slice = collection1.getSlice("shard1"); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java index a0bb42a3ee6..f4436eb9e65 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java @@ -119,7 +119,6 @@ public class TestCloudDeleteByQuery extends SolrCloudTestCase { String nodeKey = jettyURL.getHost() + ":" + jettyURL.getPort() + jettyURL.getPath().replace("/","_"); urlMap.put(nodeKey, jettyURL.toString()); } - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); for (Slice slice : clusterState.getSlices(COLLECTION_NAME)) { String shardName = slice.getName(); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java index b203f02c877..45b6f733bec 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java @@ -625,7 +625,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase { .setCollectionName("testClusterStateMigration") .process(client); - client.getZkStateReader().updateClusterState(); + client.getZkStateReader().forceUpdateCollection("testClusterStateMigration"); assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat()); @@ -735,7 +735,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase { private Map getProps(CloudSolrClient client, String collectionName, String replicaName, String... props) throws KeeperException, InterruptedException { - client.getZkStateReader().updateClusterState(); + client.getZkStateReader().forceUpdateCollection(collectionName); ClusterState clusterState = client.getZkStateReader().getClusterState(); Replica replica = clusterState.getReplica(collectionName, replicaName); if (replica == null) { diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java index f2c58cf808a..11858f828b7 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestLeaderInitiatedRecoveryThread.java @@ -175,7 +175,6 @@ public class TestLeaderInitiatedRecoveryThread extends AbstractFullDistribZkTest timeOut = new TimeOut(30, TimeUnit.SECONDS); while (!timeOut.hasTimedOut()) { - cloudClient.getZkStateReader().updateClusterState(); Replica r = cloudClient.getZkStateReader().getClusterState().getReplica(DEFAULT_COLLECTION, replica.getName()); if (r.getState() == Replica.State.DOWN) { break; diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java index 9be89190d42..880051b1f83 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudCluster.java @@ -176,7 +176,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { assertEquals(1, rsp.getResults().getNumFound()); // remove a server not hosting any replicas - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); ClusterState clusterState = zkStateReader.getClusterState(); HashMap jettyMap = new HashMap(); for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) { @@ -321,7 +321,8 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { try (SolrZkClient zkClient = new SolrZkClient (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null); ZkStateReader zkStateReader = new ZkStateReader(zkClient)) { - + zkStateReader.createClusterStateWatchersAndUpdate(); + // wait for collection to appear AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); @@ -368,6 +369,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { try (SolrZkClient zkClient = new SolrZkClient (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null); ZkStateReader zkStateReader = new ZkStateReader(zkClient)) { + zkStateReader.createClusterStateWatchersAndUpdate(); AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); // modify collection @@ -385,7 +387,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { } // the test itself - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); final ClusterState clusterState = zkStateReader.getClusterState(); final HashSet leaderIndices = new HashSet(); @@ -444,7 +446,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { } AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); // re-query collection { @@ -489,32 +491,29 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase { } } - try (SolrZkClient zkClient = new SolrZkClient - (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null); - ZkStateReader zkStateReader = new ZkStateReader(zkClient)) { - AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); + ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); + AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); - // add some documents, then optimize to get merged-sorted segments - tstes.addDocuments(cloudSolrClient, 10, 10, true); + // add some documents, then optimize to get merged-sorted segments + tstes.addDocuments(cloudSolrClient, 10, 10, true); - // CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent - tstes.queryTimestampDescending(cloudSolrClient); + // CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent + tstes.queryTimestampDescending(cloudSolrClient); - // add a few more documents, but don't optimize to have some not-merge-sorted segments - tstes.addDocuments(cloudSolrClient, 2, 10, false); + // add a few more documents, but don't optimize to have some not-merge-sorted segments + tstes.addDocuments(cloudSolrClient, 2, 10, false); - // CommonParams.SEGMENT_TERMINATE_EARLY parameter now present - tstes.queryTimestampDescendingSegmentTerminateEarlyYes(cloudSolrClient); - tstes.queryTimestampDescendingSegmentTerminateEarlyNo(cloudSolrClient); + // CommonParams.SEGMENT_TERMINATE_EARLY parameter now present + tstes.queryTimestampDescendingSegmentTerminateEarlyYes(cloudSolrClient); + tstes.queryTimestampDescendingSegmentTerminateEarlyNo(cloudSolrClient); - // CommonParams.SEGMENT_TERMINATE_EARLY parameter present but it won't be used - tstes.queryTimestampDescendingSegmentTerminateEarlyYesGrouped(cloudSolrClient); - tstes.queryTimestampAscendingSegmentTerminateEarlyYes(cloudSolrClient); // uses a sort order that is _not_ compatible with the merge sort order + // CommonParams.SEGMENT_TERMINATE_EARLY parameter present but it won't be used + tstes.queryTimestampDescendingSegmentTerminateEarlyYesGrouped(cloudSolrClient); + tstes.queryTimestampAscendingSegmentTerminateEarlyYes(cloudSolrClient); // uses a sort order that is _not_ compatible with the merge sort order - // delete the collection we created earlier - miniCluster.deleteCollection(collectionName); - AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330); - } + // delete the collection we created earlier + miniCluster.deleteCollection(collectionName); + AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330); } finally { miniCluster.shutdown(); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterBase.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterBase.java index 54b21dff5de..18285617d9a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterBase.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterBase.java @@ -146,6 +146,7 @@ public class TestMiniSolrCloudClusterBase extends LuceneTestCase { try (SolrZkClient zkClient = new SolrZkClient (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null); ZkStateReader zkStateReader = new ZkStateReader(zkClient)) { + zkStateReader.createClusterStateWatchersAndUpdate(); AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); // modify/query collection @@ -160,7 +161,7 @@ public class TestMiniSolrCloudClusterBase extends LuceneTestCase { assertEquals(1, rsp.getResults().getNumFound()); // remove a server not hosting any replicas - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collectionName); ClusterState clusterState = zkStateReader.getClusterState(); HashMap jettyMap = new HashMap(); for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) { diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java index 25ffe842569..256774d08c3 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java @@ -88,7 +88,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase waitForRecoveriesToFinish("a1x2", true); waitForRecoveriesToFinish("b1x1", true); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection("b1x1"); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); DocCollection b1x1 = clusterState.getCollection("b1x1"); @@ -137,7 +137,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase waitForRecoveriesToFinish("football", true); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection("football"); Replica leader = null; Replica notLeader = null; diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java index 3c720bfeddf..9208229976a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java @@ -310,7 +310,6 @@ public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase { TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS); while (! timeout.hasTimedOut()) { goAgain = false; - cloudClient.getZkStateReader().updateClusterState(); Map slices = cloudClient.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap(); for (Map.Entry ent : expected.entrySet()) { diff --git a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java index 5cc15e2ba36..fc2a7e25740 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java @@ -192,7 +192,6 @@ public class TestReplicaProperties extends ReplicaPropertiesBase { String lastFailMsg = ""; for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds. lastFailMsg = ""; - client.getZkStateReader().updateClusterState(); ClusterState clusterState = client.getZkStateReader().getClusterState(); for (Slice slice : clusterState.getSlices(collectionName)) { Boolean foundLeader = false; diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java index 4d3ee30ad69..f4dc97de95b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithKerberosAlt.java @@ -205,6 +205,7 @@ public class TestSolrCloudWithKerberosAlt extends LuceneTestCase { try (SolrZkClient zkClient = new SolrZkClient (miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null); ZkStateReader zkStateReader = new ZkStateReader(zkClient)) { + zkStateReader.createClusterStateWatchersAndUpdate(); AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330); // modify/query collection diff --git a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java index dd337fb8530..7d53feebf73 100644 --- a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java @@ -187,7 +187,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest { } ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection("unloadcollection"); int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size(); assertEquals(1, slices); @@ -203,7 +203,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest { createCmd.setDataDir(getDataDir(core2dataDir)); adminClient.request(createCmd); } - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection("unloadcollection"); slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size(); assertEquals(1, slices); diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java index cffbb543e49..7b293ca5ea6 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java @@ -296,7 +296,7 @@ public class ZkControllerTest extends SolrTestCaseJ4 { byte[] bytes = Utils.toJSON(state); zkController.getZkClient().makePath(ZkStateReader.getCollectionPath("testPublishAndWaitForDownStates"), bytes, CreateMode.PERSISTENT, true); - zkController.getZkStateReader().updateClusterState(); + zkController.getZkStateReader().forceUpdateCollection("testPublishAndWaitForDownStates"); assertTrue(zkController.getZkStateReader().getClusterState().hasCollection("testPublishAndWaitForDownStates")); assertNotNull(zkController.getZkStateReader().getClusterState().getCollection("testPublishAndWaitForDownStates")); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java index 445c4b8f615..601f4fe723a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java @@ -154,7 +154,7 @@ public class StressHdfsTest extends BasicDistributedZkTest { waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false); cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION); - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION); for (int i = 1; i < nShards + 1; i++) { cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000); @@ -211,7 +211,6 @@ public class StressHdfsTest extends BasicDistributedZkTest { } Thread.sleep(200); - cloudClient.getZkStateReader().updateClusterState(); } // check that all dirs are gone diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java index 69626b0828b..10cc46c5165 100644 --- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java @@ -94,7 +94,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 { assertFalse(exists); if (explicitRefresh) { - reader.updateClusterState(); + reader.forceUpdateCollection("c1"); } else { for (int i = 0; i < 100; ++i) { if (reader.getClusterState().hasCollection("c1")) { @@ -122,7 +122,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 { assertTrue(exists); if (explicitRefresh) { - reader.updateClusterState(); + reader.forceUpdateCollection("c1"); } else { for (int i = 0; i < 100; ++i) { if (reader.getClusterState().getCollection("c1").getStateFormat() == 2) { @@ -167,7 +167,7 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 { new DocCollection("c1", new HashMap(), new HashMap(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json")); writer.enqueueUpdate(reader.getClusterState(), c1, null); writer.writePendingUpdates(); - reader.updateClusterState(); + reader.forceUpdateCollection("c1"); assertTrue(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded()); reader.addCollectionWatch("c1"); diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java index 8e7b0098121..f5648bf148c 100644 --- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java @@ -233,7 +233,8 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 { writer.enqueueUpdate(reader.getClusterState(), c1, null); writer.writePendingUpdates(); - reader.updateClusterState(); + reader.forceUpdateCollection("c1"); + reader.forceUpdateCollection("c2"); ClusterState clusterState = reader.getClusterState(); // keep a reference to the current cluster state object assertTrue(clusterState.hasCollection("c1")); assertFalse(clusterState.hasCollection("c2")); @@ -257,7 +258,6 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 { // expected } - reader.updateClusterState(); try { writer.enqueueUpdate(reader.getClusterState(), c2, null); fail("enqueueUpdate after BadVersionException should not have suceeded"); @@ -317,7 +317,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 { zkClient.setData(ZkStateReader.getCollectionPath("c2"), data, true); // get the most up-to-date state - reader.updateClusterState(); + reader.forceUpdateCollection("c2"); state = reader.getClusterState(); assertTrue(state.hasCollection("c2")); assertEquals(sharedClusterStateVersion, (int) state.getZkClusterStateVersion()); @@ -328,7 +328,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 { assertTrue(writer.hasPendingUpdates()); // get the most up-to-date state - reader.updateClusterState(); + reader.forceUpdateCollection("c2"); state = reader.getClusterState(); // enqueue a stateFormat=1 collection which should cause a flush @@ -336,7 +336,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 { new DocCollection("c1", new HashMap(), new HashMap(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE)); try { - state = writer.enqueueUpdate(state, c1, null); + writer.enqueueUpdate(state, c1, null); fail("Enqueue should not have succeeded"); } catch (KeeperException.BadVersionException bve) { // expected diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java index 3dbc6d2876d..308b3e000a5 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java @@ -226,7 +226,10 @@ public class ZkStateReader implements Closeable { /** * Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive. + * + * @deprecated Don't call this, call {@link #forceUpdateCollection(String)} on a single collection if you must. */ + @Deprecated public void updateClusterState() throws KeeperException, InterruptedException { synchronized (getUpdateLock()) { if (clusterState == null) { @@ -248,6 +251,49 @@ public class ZkStateReader implements Closeable { } } + /** + * Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when + * a better design is possible. + */ + public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException { + synchronized (getUpdateLock()) { + if (clusterState == null) { + return; + } + + ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection); + if (ref == null) { + // We don't know anything about this collection, maybe it's new? + // First try to update the legacy cluster state. + refreshLegacyClusterState(null); + if (!legacyCollectionStates.containsKey(collection)) { + // No dice, see if a new collection just got created. + LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection); + if (tryLazyCollection.get() == null) { + // No dice, just give up. + return; + } + // What do you know, it exists! + lazyCollectionStates.putIfAbsent(collection, tryLazyCollection); + } + } else if (ref.isLazilyLoaded()) { + if (ref.get() != null) { + return; + } + // Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there. + refreshLegacyClusterState(null); + } else if (legacyCollectionStates.containsKey(collection)) { + // Exists, and lives in legacy cluster state, force a refresh. + refreshLegacyClusterState(null); + } else if (watchedCollectionStates.containsKey(collection)) { + // Exists as a watched collection, force a refresh. + DocCollection newState = fetchCollectionState(collection, null); + updateWatchedCollection(collection, newState); + } + constructState(); + } + } + /** Refresh the set of live nodes. */ public void updateLiveNodes() throws KeeperException, InterruptedException { refreshLiveNodes(null); diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java index ff423826080..7b3617ba86c 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java @@ -145,7 +145,6 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes while (cont) { if (verbose) System.out.println("-"); boolean sawLiveRecovering = false; - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); Map slices = clusterState.getSlicesMap(collection); assertNotNull("Could not find collection:" + collection, slices); @@ -195,7 +194,6 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes while (cont) { if (verbose) System.out.println("-"); - zkStateReader.updateClusterState(); ClusterState clusterState = zkStateReader.getClusterState(); if (!clusterState.hasCollection(collection)) break; if (cnt == timeoutSeconds) { @@ -239,7 +237,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes protected void assertAllActive(String collection,ZkStateReader zkStateReader) throws KeeperException, InterruptedException { - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collection); ClusterState clusterState = zkStateReader.getClusterState(); Map slices = clusterState.getSlicesMap(collection); if (slices == null) { diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java index bf8f643656b..a584dbd450b 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java @@ -626,7 +626,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes protected void updateMappingsFromZk(List jettys, List clients, boolean allowOverSharding) throws Exception { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION); cloudJettys.clear(); shardToJetty.clear(); @@ -1814,7 +1814,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes Map notLeaders = new HashMap<>(); ZkStateReader zkr = cloudClient.getZkStateReader(); - zkr.updateClusterState(); // force the state to be fresh + zkr.forceUpdateCollection(testCollectionName); // force the state to be fresh ClusterState cs = zkr.getClusterState(); Collection slices = cs.getActiveSlices(testCollectionName); @@ -1824,10 +1824,6 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes long maxWaitMs = maxWaitSecs * 1000L; Replica leader = null; while (waitMs < maxWaitMs && !allReplicasUp) { - // refresh state every 2 secs - if (waitMs % 2000 == 0) - cloudClient.getZkStateReader().updateClusterState(); - cs = cloudClient.getZkStateReader().getClusterState(); assertNotNull(cs); Slice shard = cs.getSlice(testCollectionName, shardId); @@ -1879,7 +1875,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes } protected String printClusterStateInfo(String collection) throws Exception { - cloudClient.getZkStateReader().updateClusterState(); + cloudClient.getZkStateReader().forceUpdateCollection(collection); String cs = null; ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); if (collection != null) { diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java index d13d62f0683..511fdf34b15 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java @@ -425,7 +425,7 @@ public class ChaosMonkey { for (CloudJettyRunner cloudJetty : shardToJetty.get(slice)) { // get latest cloud state - zkStateReader.updateClusterState(); + zkStateReader.forceUpdateCollection(collection); Slice theShards = zkStateReader.getClusterState().getSlicesMap(collection) .get(slice); @@ -447,18 +447,6 @@ public class ChaosMonkey { return numActive; } - public SolrClient getRandomClient(String slice) throws KeeperException, InterruptedException { - // get latest cloud state - zkStateReader.updateClusterState(); - - // get random shard - List clients = shardToClient.get(slice); - int index = LuceneTestCase.random().nextInt(clients.size() - 1); - SolrClient client = clients.get(index); - - return client; - } - // synchronously starts and stops shards randomly, unless there is only one // active shard up for a slice or if there is one active and others recovering public void startTheMonkey(boolean killLeaders, final int roundPauseUpperLimit) { From 5429356fc4ba021d2afe7766b832ca0dc93a0d57 Mon Sep 17 00:00:00 2001 From: Mike McCandless Date: Mon, 7 Mar 2016 08:38:38 -0500 Subject: [PATCH 11/11] make test less evil --- .../src/test/org/apache/lucene/search/TestPointQueries.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java index 5a3483bc30f..500bb8fd25f 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java @@ -351,12 +351,12 @@ public class TestPointQueries extends LuceneTestCase { @Nightly public void testRandomLongsBig() throws Exception { - doTestRandomLongs(200000); + doTestRandomLongs(100000); } private void doTestRandomLongs(int count) throws Exception { - int numValues = atLeast(count); + int numValues = TestUtil.nextInt(random(), count, count*2); if (VERBOSE) { System.out.println("TEST: numValues=" + numValues);